code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = [0 for i in range(r + 1 )] # nc0 = 1 __SCREAMING_SNAKE_CASE = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. __SCREAMING_SNAKE_CASE = min(UpperCAmelCase__ , UpperCAmelCase__ ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
690
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase__ ={ "configuration_altclip": [ "ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "AltCLIPConfig", "AltCLIPTextConfig", "AltCLIPVisionConfig", ], "processing_altclip": ["AltCLIPProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =[ "ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "AltCLIPPreTrainedModel", "AltCLIPModel", "AltCLIPTextModel", "AltCLIPVisionModel", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys lowerCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
690
1
"""simple docstring""" import os import sys lowerCAmelCase__ =os.path.join(os.path.dirname(__file__), "src") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) lowerCAmelCase__ =[ "torch", "numpy", "tokenizers", "filelock", "requests", "tqdm", "regex", "sentencepiece", "sacremoses", "importlib_metadata", "huggingface_hub", ] @add_start_docstrings(AutoConfig.__doc__ ) def _a ( *UpperCAmelCase__ , **UpperCAmelCase__ ) -> Dict: return AutoConfig.from_pretrained(*UpperCAmelCase__ , **UpperCAmelCase__ ) @add_start_docstrings(AutoTokenizer.__doc__ ) def _a ( *UpperCAmelCase__ , **UpperCAmelCase__ ) -> Dict: return AutoTokenizer.from_pretrained(*UpperCAmelCase__ , **UpperCAmelCase__ ) @add_start_docstrings(AutoModel.__doc__ ) def _a ( *UpperCAmelCase__ , **UpperCAmelCase__ ) -> Optional[Any]: return AutoModel.from_pretrained(*UpperCAmelCase__ , **UpperCAmelCase__ ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def _a ( *UpperCAmelCase__ , **UpperCAmelCase__ ) -> str: return AutoModelForCausalLM.from_pretrained(*UpperCAmelCase__ , **UpperCAmelCase__ ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def _a ( *UpperCAmelCase__ , **UpperCAmelCase__ ) -> List[str]: return AutoModelForMaskedLM.from_pretrained(*UpperCAmelCase__ , **UpperCAmelCase__ ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def _a ( *UpperCAmelCase__ , **UpperCAmelCase__ ) -> List[str]: return AutoModelForSequenceClassification.from_pretrained(*UpperCAmelCase__ , **UpperCAmelCase__ ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def _a ( *UpperCAmelCase__ , **UpperCAmelCase__ ) -> int: return AutoModelForQuestionAnswering.from_pretrained(*UpperCAmelCase__ , **UpperCAmelCase__ )
690
"""simple docstring""" import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class A__( unittest.TestCase ): def _a ( self : int ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = 0 def _a ( self : Tuple ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : str ) -> Optional[int]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json''' __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : List[str] ) -> Optional[int]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json''' __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : str ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = CLIPConfig() # Create a dummy config file with image_proceesor_type __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json''' __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ).to_dict() config_dict.pop('''image_processor_type''' ) __SCREAMING_SNAKE_CASE = CLIPImageProcessor(**__SCREAMING_SNAKE_CASE ) # save in new folder model_config.save_pretrained(__SCREAMING_SNAKE_CASE ) config.save_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) # make sure private variable is not incorrectly saved __SCREAMING_SNAKE_CASE = json.loads(config.to_json_string() ) self.assertTrue('''_processor_class''' not in dict_as_saved ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : Optional[int] ) -> Tuple: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : List[Any] ) -> str: """simple docstring""" with self.assertRaisesRegex( __SCREAMING_SNAKE_CASE , '''clip-base is not a local folder and is not a valid model identifier''' ): __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''clip-base''' ) def _a ( self : Any ) -> Optional[Any]: """simple docstring""" with self.assertRaisesRegex( __SCREAMING_SNAKE_CASE , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE , revision='''aaaaaa''' ) def _a ( self : Dict ) -> Dict: """simple docstring""" with self.assertRaisesRegex( __SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ): __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' ) def _a ( self : int ) -> Any: """simple docstring""" with self.assertRaises(__SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(__SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE , trust_remote_code=__SCREAMING_SNAKE_CASE ) self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' ) def _a ( self : Optional[Any] ) -> str: """simple docstring""" try: AutoConfig.register('''custom''' , __SCREAMING_SNAKE_CASE ) AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__SCREAMING_SNAKE_CASE ): AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json''' __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) ) __SCREAMING_SNAKE_CASE = CustomImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def _a ( self : int ) -> List[Any]: """simple docstring""" class A__( __magic_name__ ): lowerCAmelCase = True try: AutoConfig.register('''custom''' , __SCREAMING_SNAKE_CASE ) AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # If remote code is not set, the default is to use local __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(not hasattr(__SCREAMING_SNAKE_CASE , '''is_local''' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
690
1
"""simple docstring""" import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import YolosImageProcessor class A__( unittest.TestCase ): def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=7 , __SCREAMING_SNAKE_CASE : Optional[Any]=3 , __SCREAMING_SNAKE_CASE : Optional[int]=30 , __SCREAMING_SNAKE_CASE : int=4_00 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Dict=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Optional[int]=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : int=1 / 2_55 , __SCREAMING_SNAKE_CASE : Optional[int]=True , ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33} __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = min_resolution __SCREAMING_SNAKE_CASE = max_resolution __SCREAMING_SNAKE_CASE = do_resize __SCREAMING_SNAKE_CASE = size __SCREAMING_SNAKE_CASE = do_normalize __SCREAMING_SNAKE_CASE = image_mean __SCREAMING_SNAKE_CASE = image_std __SCREAMING_SNAKE_CASE = do_rescale __SCREAMING_SNAKE_CASE = rescale_factor __SCREAMING_SNAKE_CASE = do_pad def _a ( self : Any ) -> Union[str, Any]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _a ( self : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any]=False ) -> int: """simple docstring""" if not batched: __SCREAMING_SNAKE_CASE = image_inputs[0] if isinstance(__SCREAMING_SNAKE_CASE , Image.Image ): __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = image.size else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2] if w < h: __SCREAMING_SNAKE_CASE = int(self.size['''shortest_edge'''] * h / w ) __SCREAMING_SNAKE_CASE = self.size['''shortest_edge'''] elif w > h: __SCREAMING_SNAKE_CASE = self.size['''shortest_edge'''] __SCREAMING_SNAKE_CASE = int(self.size['''shortest_edge'''] * w / h ) else: __SCREAMING_SNAKE_CASE = self.size['''shortest_edge'''] __SCREAMING_SNAKE_CASE = self.size['''shortest_edge'''] else: __SCREAMING_SNAKE_CASE = [] for image in image_inputs: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __SCREAMING_SNAKE_CASE = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[0] )[0] __SCREAMING_SNAKE_CASE = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class A__( __magic_name__ , unittest.TestCase ): lowerCAmelCase = YolosImageProcessor if is_vision_available() else None def _a ( self : Dict ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = YolosImageProcessingTester(self ) @property def _a ( self : List[Any] ) -> Optional[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _a ( self : Optional[int] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) ) def _a ( self : int ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} ) self.assertEqual(image_processor.do_pad , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , __SCREAMING_SNAKE_CASE ) def _a ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" pass def _a ( self : List[Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _a ( self : Dict ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __SCREAMING_SNAKE_CASE = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _a ( self : str ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __SCREAMING_SNAKE_CASE = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _a ( self : Optional[int] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) __SCREAMING_SNAKE_CASE = self.image_processing_class(do_resize=__SCREAMING_SNAKE_CASE , do_normalize=__SCREAMING_SNAKE_CASE , do_rescale=__SCREAMING_SNAKE_CASE ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor ) # Test whether the method "pad" and calling the image processor return the same tensors __SCREAMING_SNAKE_CASE = image_processing_a.pad(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) __SCREAMING_SNAKE_CASE = image_processing_a(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) self.assertTrue( torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1E-4 ) ) @slow def _a ( self : List[str] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: __SCREAMING_SNAKE_CASE = json.loads(f.read() ) __SCREAMING_SNAKE_CASE = {'''image_id''': 3_97_69, '''annotations''': target} # encode them __SCREAMING_SNAKE_CASE = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' ) __SCREAMING_SNAKE_CASE = image_processing(images=__SCREAMING_SNAKE_CASE , annotations=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) # verify pixel values __SCREAMING_SNAKE_CASE = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) ) # verify area __SCREAMING_SNAKE_CASE = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __SCREAMING_SNAKE_CASE ) ) # verify boxes __SCREAMING_SNAKE_CASE = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __SCREAMING_SNAKE_CASE , atol=1E-3 ) ) # verify image_id __SCREAMING_SNAKE_CASE = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __SCREAMING_SNAKE_CASE ) ) # verify is_crowd __SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __SCREAMING_SNAKE_CASE ) ) # verify class_labels __SCREAMING_SNAKE_CASE = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __SCREAMING_SNAKE_CASE ) ) # verify orig_size __SCREAMING_SNAKE_CASE = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __SCREAMING_SNAKE_CASE ) ) # verify size __SCREAMING_SNAKE_CASE = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __SCREAMING_SNAKE_CASE ) ) @slow def _a ( self : Dict ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: __SCREAMING_SNAKE_CASE = json.loads(f.read() ) __SCREAMING_SNAKE_CASE = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target} __SCREAMING_SNAKE_CASE = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them __SCREAMING_SNAKE_CASE = YolosImageProcessor(format='''coco_panoptic''' ) __SCREAMING_SNAKE_CASE = image_processing(images=__SCREAMING_SNAKE_CASE , annotations=__SCREAMING_SNAKE_CASE , masks_path=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) # verify pixel values __SCREAMING_SNAKE_CASE = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) ) # verify area __SCREAMING_SNAKE_CASE = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __SCREAMING_SNAKE_CASE ) ) # verify boxes __SCREAMING_SNAKE_CASE = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __SCREAMING_SNAKE_CASE , atol=1E-3 ) ) # verify image_id __SCREAMING_SNAKE_CASE = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __SCREAMING_SNAKE_CASE ) ) # verify is_crowd __SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __SCREAMING_SNAKE_CASE ) ) # verify class_labels __SCREAMING_SNAKE_CASE = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __SCREAMING_SNAKE_CASE ) ) # verify masks __SCREAMING_SNAKE_CASE = 82_28_73 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __SCREAMING_SNAKE_CASE ) # verify orig_size __SCREAMING_SNAKE_CASE = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __SCREAMING_SNAKE_CASE ) ) # verify size __SCREAMING_SNAKE_CASE = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __SCREAMING_SNAKE_CASE ) )
690
"""simple docstring""" import math lowerCAmelCase__ =10 lowerCAmelCase__ =7 lowerCAmelCase__ =BALLS_PER_COLOUR * NUM_COLOURS def _a ( UpperCAmelCase__ = 20 ) -> str: __SCREAMING_SNAKE_CASE = math.comb(UpperCAmelCase__ , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = NUM_COLOURS * (1 - missing_colour / total) return f"""{result:.9f}""" if __name__ == "__main__": print(solution(20))
690
1
"""simple docstring""" import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process lowerCAmelCase__ =logging.getLogger(__name__) def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> Union[str, Any]: return (preds == labels).mean() @dataclass class A__: lowerCAmelCase = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) lowerCAmelCase = field( default=__magic_name__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) lowerCAmelCase = field( default=__magic_name__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) lowerCAmelCase = field( default=__magic_name__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) @dataclass class A__: lowerCAmelCase = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} ) lowerCAmelCase = field(metadata={'''help''': '''Should contain the data files for the task.'''} ) lowerCAmelCase = field( default=1_28 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) lowerCAmelCase = field( default=__magic_name__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def _a ( ) -> Tuple: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , UpperCAmelCase__ ) # Set seed set_seed(training_args.seed ) try: __SCREAMING_SNAKE_CASE = processors[data_args.task_name]() __SCREAMING_SNAKE_CASE = processor.get_labels() __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) except KeyError: raise ValueError('''Task not found: %s''' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __SCREAMING_SNAKE_CASE = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , ) # Get datasets __SCREAMING_SNAKE_CASE = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) __SCREAMING_SNAKE_CASE = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(UpperCAmelCase__ ) -> Dict: __SCREAMING_SNAKE_CASE = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(UpperCAmelCase__ , p.label_ids )} # Data collator __SCREAMING_SNAKE_CASE = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer __SCREAMING_SNAKE_CASE = Trainer( model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __SCREAMING_SNAKE_CASE = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __SCREAMING_SNAKE_CASE = trainer.evaluate() __SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , '''eval_results.txt''' ) if trainer.is_world_master(): with open(UpperCAmelCase__ , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(''' %s = %s''' , UpperCAmelCase__ , UpperCAmelCase__ ) writer.write('''%s = %s\n''' % (key, value) ) results.update(UpperCAmelCase__ ) return results def _a ( UpperCAmelCase__ ) -> Optional[Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
690
"""simple docstring""" from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax lowerCAmelCase__ =logging.get_logger(__name__) @add_end_docstrings(__magic_name__ ) class A__( __magic_name__ ): def __init__( self : Optional[Any] , **__SCREAMING_SNAKE_CASE : str ) -> Optional[Any]: """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) requires_backends(self , '''vision''' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image", List["Image"]] , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple: """simple docstring""" return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _a ( self : int , **__SCREAMING_SNAKE_CASE : int ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = {} if "candidate_labels" in kwargs: __SCREAMING_SNAKE_CASE = kwargs['''candidate_labels'''] if "hypothesis_template" in kwargs: __SCREAMING_SNAKE_CASE = kwargs['''hypothesis_template'''] return preprocess_params, {}, {} def _a ( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]="This is a photo of {}." ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = load_image(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.image_processor(images=[image] , return_tensors=self.framework ) __SCREAMING_SNAKE_CASE = candidate_labels __SCREAMING_SNAKE_CASE = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels] __SCREAMING_SNAKE_CASE = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [text_inputs] return inputs def _a ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = model_inputs.pop('''candidate_labels''' ) __SCREAMING_SNAKE_CASE = model_inputs.pop('''text_inputs''' ) if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = text_inputs[0] else: # Batching case. __SCREAMING_SNAKE_CASE = text_inputs[0][0] __SCREAMING_SNAKE_CASE = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = { '''candidate_labels''': candidate_labels, '''logits''': outputs.logits_per_image, } return model_outputs def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = model_outputs.pop('''candidate_labels''' ) __SCREAMING_SNAKE_CASE = model_outputs['''logits'''][0] if self.framework == "pt": __SCREAMING_SNAKE_CASE = logits.softmax(dim=-1 ).squeeze(-1 ) __SCREAMING_SNAKE_CASE = probs.tolist() if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = [scores] elif self.framework == "tf": __SCREAMING_SNAKE_CASE = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 ) __SCREAMING_SNAKE_CASE = probs.numpy().tolist() else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) __SCREAMING_SNAKE_CASE = [ {'''score''': score, '''label''': candidate_label} for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : -x[0] ) ] return result
690
1
"""simple docstring""" from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def _a ( UpperCAmelCase__ = "isbn/0140328726" ) -> dict: __SCREAMING_SNAKE_CASE = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes if new_olid.count('''/''' ) != 1: __SCREAMING_SNAKE_CASE = f"""{olid} is not a valid Open Library olid""" raise ValueError(UpperCAmelCase__ ) return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json() def _a ( UpperCAmelCase__ ) -> dict: __SCREAMING_SNAKE_CASE = { '''title''': '''Title''', '''publish_date''': '''Publish date''', '''authors''': '''Authors''', '''number_of_pages''': '''Number of pages:''', '''first_sentence''': '''First sentence''', '''isbn_10''': '''ISBN (10)''', '''isbn_13''': '''ISBN (13)''', } __SCREAMING_SNAKE_CASE = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} __SCREAMING_SNAKE_CASE = [ get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors'''] ] __SCREAMING_SNAKE_CASE = data['''First sentence''']['''value'''] for key, value in data.items(): if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = ''', '''.join(UpperCAmelCase__ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: lowerCAmelCase__ =input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(F'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''') continue print(F'''\nSearching Open Library for ISBN: {isbn}...\n''') try: lowerCAmelCase__ =summarize_book(get_openlibrary_data(F'''isbn/{isbn}''')) print("\n".join(F'''{key}: {value}''' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(F'''Sorry, there are no results for ISBN: {isbn}.''')
690
"""simple docstring""" from __future__ import annotations from collections.abc import Callable lowerCAmelCase__ =list[list[float | int]] def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> Matrix: __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [[0 for _ in range(size + 1 )] for _ in range(UpperCAmelCase__ )] __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for row in range(UpperCAmelCase__ ): for col in range(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = matrix[row][col] __SCREAMING_SNAKE_CASE = vector[row][0] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 while row < size and col < size: # pivoting __SCREAMING_SNAKE_CASE = max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCAmelCase__ , UpperCAmelCase__ ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = augmented[rowa][col] / augmented[row][col] __SCREAMING_SNAKE_CASE = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , UpperCAmelCase__ ): for row in range(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = augmented[row][col] / augmented[col][col] for cola in range(UpperCAmelCase__ , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(UpperCAmelCase__ ) ] def _a ( UpperCAmelCase__ ) -> Callable[[int], int]: __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [[0 for _ in range(UpperCAmelCase__ )] for _ in range(UpperCAmelCase__ )] __SCREAMING_SNAKE_CASE = [[0] for _ in range(UpperCAmelCase__ )] __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for x_val, y_val in enumerate(UpperCAmelCase__ ): for col in range(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = (x_val + 1) ** (size - col - 1) __SCREAMING_SNAKE_CASE = y_val __SCREAMING_SNAKE_CASE = solve(UpperCAmelCase__ , UpperCAmelCase__ ) def interpolated_func(UpperCAmelCase__ ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(UpperCAmelCase__ ) ) return interpolated_func def _a ( UpperCAmelCase__ ) -> int: return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def _a ( UpperCAmelCase__ = question_function , UpperCAmelCase__ = 10 ) -> int: __SCREAMING_SNAKE_CASE = [func(UpperCAmelCase__ ) for x_val in range(1 , order + 1 )] __SCREAMING_SNAKE_CASE = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for poly in polynomials: __SCREAMING_SNAKE_CASE = 1 while func(UpperCAmelCase__ ) == poly(UpperCAmelCase__ ): x_val += 1 ret += poly(UpperCAmelCase__ ) return ret if __name__ == "__main__": print(F'''{solution() = }''')
690
1
"""simple docstring""" import json import os import torch from diffusers import UNetaDModel os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True) os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True) os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True) def _a ( UpperCAmelCase__ ) -> Any: if hor == 1_28: __SCREAMING_SNAKE_CASE = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''') __SCREAMING_SNAKE_CASE = (32, 1_28, 2_56) __SCREAMING_SNAKE_CASE = ('''UpResnetBlock1D''', '''UpResnetBlock1D''') elif hor == 32: __SCREAMING_SNAKE_CASE = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''') __SCREAMING_SNAKE_CASE = (32, 64, 1_28, 2_56) __SCREAMING_SNAKE_CASE = ('''UpResnetBlock1D''', '''UpResnetBlock1D''', '''UpResnetBlock1D''') __SCREAMING_SNAKE_CASE = torch.load(f"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" ) __SCREAMING_SNAKE_CASE = model.state_dict() __SCREAMING_SNAKE_CASE = { '''down_block_types''': down_block_types, '''block_out_channels''': block_out_channels, '''up_block_types''': up_block_types, '''layers_per_block''': 1, '''use_timestep_embedding''': True, '''out_block_type''': '''OutConv1DBlock''', '''norm_num_groups''': 8, '''downsample_each_block''': False, '''in_channels''': 14, '''out_channels''': 14, '''extra_in_channels''': 0, '''time_embedding_type''': '''positional''', '''flip_sin_to_cos''': False, '''freq_shift''': 1, '''sample_size''': 6_55_36, '''mid_block_type''': '''MidResTemporalBlock1D''', '''act_fn''': '''mish''', } __SCREAMING_SNAKE_CASE = UNetaDModel(**UpperCAmelCase__ ) print(f"""length of state dict: {len(state_dict.keys() )}""" ) print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" ) __SCREAMING_SNAKE_CASE = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): __SCREAMING_SNAKE_CASE = state_dict.pop(UpperCAmelCase__ ) hf_value_function.load_state_dict(UpperCAmelCase__ ) torch.save(hf_value_function.state_dict() , f"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" ) with open(f"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , '''w''' ) as f: json.dump(UpperCAmelCase__ , UpperCAmelCase__ ) def _a ( ) -> List[Any]: __SCREAMING_SNAKE_CASE = { '''in_channels''': 14, '''down_block_types''': ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D'''), '''up_block_types''': (), '''out_block_type''': '''ValueFunction''', '''mid_block_type''': '''ValueFunctionMidBlock1D''', '''block_out_channels''': (32, 64, 1_28, 2_56), '''layers_per_block''': 1, '''downsample_each_block''': True, '''sample_size''': 6_55_36, '''out_channels''': 14, '''extra_in_channels''': 0, '''time_embedding_type''': '''positional''', '''use_timestep_embedding''': True, '''flip_sin_to_cos''': False, '''freq_shift''': 1, '''norm_num_groups''': 8, '''act_fn''': '''mish''', } __SCREAMING_SNAKE_CASE = torch.load('''/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch''' ) __SCREAMING_SNAKE_CASE = model __SCREAMING_SNAKE_CASE = UNetaDModel(**UpperCAmelCase__ ) print(f"""length of state dict: {len(state_dict.keys() )}""" ) print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" ) __SCREAMING_SNAKE_CASE = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): __SCREAMING_SNAKE_CASE = state_dict.pop(UpperCAmelCase__ ) hf_value_function.load_state_dict(UpperCAmelCase__ ) torch.save(hf_value_function.state_dict() , '''hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin''' ) with open('''hub/hopper-medium-v2/value_function/config.json''' , '''w''' ) as f: json.dump(UpperCAmelCase__ , UpperCAmelCase__ ) if __name__ == "__main__": unet(32) # unet(128) value_function()
690
"""simple docstring""" from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def _a ( UpperCAmelCase__ = "isbn/0140328726" ) -> dict: __SCREAMING_SNAKE_CASE = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes if new_olid.count('''/''' ) != 1: __SCREAMING_SNAKE_CASE = f"""{olid} is not a valid Open Library olid""" raise ValueError(UpperCAmelCase__ ) return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json() def _a ( UpperCAmelCase__ ) -> dict: __SCREAMING_SNAKE_CASE = { '''title''': '''Title''', '''publish_date''': '''Publish date''', '''authors''': '''Authors''', '''number_of_pages''': '''Number of pages:''', '''first_sentence''': '''First sentence''', '''isbn_10''': '''ISBN (10)''', '''isbn_13''': '''ISBN (13)''', } __SCREAMING_SNAKE_CASE = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} __SCREAMING_SNAKE_CASE = [ get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors'''] ] __SCREAMING_SNAKE_CASE = data['''First sentence''']['''value'''] for key, value in data.items(): if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = ''', '''.join(UpperCAmelCase__ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: lowerCAmelCase__ =input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(F'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''') continue print(F'''\nSearching Open Library for ISBN: {isbn}...\n''') try: lowerCAmelCase__ =summarize_book(get_openlibrary_data(F'''isbn/{isbn}''')) print("\n".join(F'''{key}: {value}''' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(F'''Sorry, there are no results for ISBN: {isbn}.''')
690
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase__ ={ "configuration_rag": ["RagConfig"], "retrieval_rag": ["RagRetriever"], "tokenization_rag": ["RagTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =[ "RagModel", "RagPreTrainedModel", "RagSequenceForGeneration", "RagTokenForGeneration", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =[ "TFRagModel", "TFRagPreTrainedModel", "TFRagSequenceForGeneration", "TFRagTokenForGeneration", ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys lowerCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
690
"""simple docstring""" from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCAmelCase__ =logging.get_logger(__name__) class A__( __magic_name__ ): lowerCAmelCase = ['''audio_values''', '''audio_mask'''] def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any]=20_48 , __SCREAMING_SNAKE_CASE : str=1 , __SCREAMING_SNAKE_CASE : List[Any]=[16, 16] , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_28 , __SCREAMING_SNAKE_CASE : int=4_41_00 , __SCREAMING_SNAKE_CASE : Union[str, Any]=86 , __SCREAMING_SNAKE_CASE : str=20_48 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> Any: """simple docstring""" super().__init__( feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = spectrogram_length __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = patch_size __SCREAMING_SNAKE_CASE = feature_size // self.patch_size[1] __SCREAMING_SNAKE_CASE = n_fft __SCREAMING_SNAKE_CASE = sampling_rate // hop_length_to_sampling_rate __SCREAMING_SNAKE_CASE = sampling_rate __SCREAMING_SNAKE_CASE = padding_value __SCREAMING_SNAKE_CASE = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , ).T def _a ( self : str , __SCREAMING_SNAKE_CASE : np.array ) -> np.ndarray: """simple docstring""" __SCREAMING_SNAKE_CASE = spectrogram( __SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , ) __SCREAMING_SNAKE_CASE = log_spec[:, :-1] __SCREAMING_SNAKE_CASE = log_spec - 20.0 __SCREAMING_SNAKE_CASE = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : str , __SCREAMING_SNAKE_CASE : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = True , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , **__SCREAMING_SNAKE_CASE : Tuple , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( '''This feature extractor is set to support sampling rate''' f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled""" f""" with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) __SCREAMING_SNAKE_CASE = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) __SCREAMING_SNAKE_CASE = is_batched_numpy or ( isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __SCREAMING_SNAKE_CASE = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ): __SCREAMING_SNAKE_CASE = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __SCREAMING_SNAKE_CASE = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __SCREAMING_SNAKE_CASE = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis __SCREAMING_SNAKE_CASE = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask __SCREAMING_SNAKE_CASE = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: __SCREAMING_SNAKE_CASE = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] __SCREAMING_SNAKE_CASE = np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa ) # convert into correct format for padding __SCREAMING_SNAKE_CASE = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch __SCREAMING_SNAKE_CASE = np.ones([len(__SCREAMING_SNAKE_CASE ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) __SCREAMING_SNAKE_CASE = padded_audio_features * self.padding_value for i in range(len(__SCREAMING_SNAKE_CASE ) ): __SCREAMING_SNAKE_CASE = audio_features[i] __SCREAMING_SNAKE_CASE = feature # return as BatchFeature if return_attention_mask: __SCREAMING_SNAKE_CASE = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask} else: __SCREAMING_SNAKE_CASE = {'''audio_values''': padded_audio_features} __SCREAMING_SNAKE_CASE = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE ) return encoded_inputs
690
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowerCAmelCase__ ={ "configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =["MobileViTFeatureExtractor"] lowerCAmelCase__ =["MobileViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =[ "MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileViTForImageClassification", "MobileViTForSemanticSegmentation", "MobileViTModel", "MobileViTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =[ "TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFMobileViTForImageClassification", "TFMobileViTForSemanticSegmentation", "TFMobileViTModel", "TFMobileViTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys lowerCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
700
"""simple docstring""" def _a ( UpperCAmelCase__ ) -> str: __SCREAMING_SNAKE_CASE = '''''' for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def _a ( UpperCAmelCase__ ) -> dict[str, str]: __SCREAMING_SNAKE_CASE = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key __SCREAMING_SNAKE_CASE = remove_duplicates(key.upper() ) __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) # First fill cipher with key characters __SCREAMING_SNAKE_CASE = {alphabet[i]: char for i, char in enumerate(UpperCAmelCase__ )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(UpperCAmelCase__ ) , 26 ): __SCREAMING_SNAKE_CASE = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 __SCREAMING_SNAKE_CASE = alphabet[i - offset] __SCREAMING_SNAKE_CASE = char return cipher_alphabet def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> str: return "".join(cipher_map.get(UpperCAmelCase__ , UpperCAmelCase__ ) for ch in message.upper() ) def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> str: __SCREAMING_SNAKE_CASE = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(UpperCAmelCase__ , UpperCAmelCase__ ) for ch in message.upper() ) def _a ( ) -> None: __SCREAMING_SNAKE_CASE = input('''Enter message to encode or decode: ''' ).strip() __SCREAMING_SNAKE_CASE = input('''Enter keyword: ''' ).strip() __SCREAMING_SNAKE_CASE = input('''Encipher or decipher? E/D:''' ).strip()[0].lower() try: __SCREAMING_SNAKE_CASE = {'''e''': encipher, '''d''': decipher}[option] except KeyError: raise KeyError('''invalid input option''' ) __SCREAMING_SNAKE_CASE = create_cipher_map(UpperCAmelCase__ ) print(func(UpperCAmelCase__ , UpperCAmelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
690
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase__ =logging.get_logger(__name__) class A__( _snake_case ): lowerCAmelCase = ['''pixel_values'''] def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 2_55 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : bool = True , **__SCREAMING_SNAKE_CASE : Any , ) -> None: """simple docstring""" super().__init__(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE = size if size is not None else {'''height''': 3_84, '''width''': 3_84} __SCREAMING_SNAKE_CASE = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE = do_resize __SCREAMING_SNAKE_CASE = size __SCREAMING_SNAKE_CASE = resample __SCREAMING_SNAKE_CASE = do_rescale __SCREAMING_SNAKE_CASE = rescale_factor __SCREAMING_SNAKE_CASE = do_normalize __SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __SCREAMING_SNAKE_CASE = image_std if image_std is not None else OPENAI_CLIP_STD __SCREAMING_SNAKE_CASE = do_convert_rgb def _a ( self : int , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> np.ndarray: """simple docstring""" __SCREAMING_SNAKE_CASE = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ ) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" ) __SCREAMING_SNAKE_CASE = (size['''height'''], size['''width''']) return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Union[int, float] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> Optional[Any]: """simple docstring""" return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def _a ( self : Any , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> np.ndarray: """simple docstring""" return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : ImageInput , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, int]] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[float] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE : int , ) -> PIL.Image.Image: """simple docstring""" __SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize __SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample __SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale __SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor __SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize __SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean __SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std __SCREAMING_SNAKE_CASE = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __SCREAMING_SNAKE_CASE = size if size is not None else self.size __SCREAMING_SNAKE_CASE = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE = make_list_of_images(lowerCAmelCase__ ) if not valid_images(lowerCAmelCase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: __SCREAMING_SNAKE_CASE = [convert_to_rgb(lowerCAmelCase__ ) for image in images] # All transformations expect numpy arrays. __SCREAMING_SNAKE_CASE = [to_numpy_array(lowerCAmelCase__ ) for image in images] if do_resize: __SCREAMING_SNAKE_CASE = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images] if do_rescale: __SCREAMING_SNAKE_CASE = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images] if do_normalize: __SCREAMING_SNAKE_CASE = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images] __SCREAMING_SNAKE_CASE = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images] __SCREAMING_SNAKE_CASE = BatchFeature(data={'''pixel_values''': images} , tensor_type=lowerCAmelCase__ ) return encoded_outputs
701
"""simple docstring""" from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__: def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : Optional[Any]=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=10 , __SCREAMING_SNAKE_CASE : str=[10, 20, 30, 40] , __SCREAMING_SNAKE_CASE : Optional[int]=[1, 1, 2, 1] , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Optional[Any]="relu" , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = embeddings_size __SCREAMING_SNAKE_CASE = hidden_sizes __SCREAMING_SNAKE_CASE = depths __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = scope __SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE ) def _a ( self : List[Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels ) __SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values, labels def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def _a ( self : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = TFRegNetModel(config=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _a ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = TFRegNetForImageClassification(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self : Optional[Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs __SCREAMING_SNAKE_CASE = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class A__( __magic_name__ , __magic_name__ , unittest.TestCase ): lowerCAmelCase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () lowerCAmelCase = ( {'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification} if is_tf_available() else {} ) lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False def _a ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFRegNetModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple ) -> Optional[Any]: """simple docstring""" return @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def _a ( self : Any ) -> Optional[Any]: """simple docstring""" pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , ) @slow def _a ( self : Dict ) -> List[Any]: """simple docstring""" super().test_keras_fit() @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def _a ( self : Dict ) -> Union[str, Any]: """simple docstring""" pass def _a ( self : List[Any] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] __SCREAMING_SNAKE_CASE = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE ) def _a ( self : Any ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def _a ( self : List[str] ) -> Tuple: """simple docstring""" def check_hidden_states_output(__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ): __SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , training=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __SCREAMING_SNAKE_CASE = self.model_tester.num_stages self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: __SCREAMING_SNAKE_CASE = layer_type __SCREAMING_SNAKE_CASE = True check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __SCREAMING_SNAKE_CASE = True check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(__SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]={} ): __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).to_tuple() def recursive_check(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict ): if isinstance(__SCREAMING_SNAKE_CASE , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): recursive_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) , msg=( '''Tuple and dict output are not equal. Difference:''' f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}""" ) , ) recursive_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE ) check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , {'''output_hidden_states''': True} ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE ) check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , {'''output_hidden_states''': True} ) def _a ( self : str ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE ) @slow def _a ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = TFRegNetModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def _a ( ) -> Dict: __SCREAMING_SNAKE_CASE = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class A__( unittest.TestCase ): @cached_property def _a ( self : List[Any] ) -> str: """simple docstring""" return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _a ( self : List[str] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) __SCREAMING_SNAKE_CASE = self.default_image_processor __SCREAMING_SNAKE_CASE = prepare_img() __SCREAMING_SNAKE_CASE = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''tf''' ) # forward pass __SCREAMING_SNAKE_CASE = model(**__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE ) # verify the logits __SCREAMING_SNAKE_CASE = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tf.constant([-0.41_80, -1.50_51, -3.48_36] ) tf.debugging.assert_near(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
690
0
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar lowerCAmelCase__ =TypeVar("T") class A__( Generic[T] ): def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : T ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = data __SCREAMING_SNAKE_CASE = None def __str__( self : Any ) -> str: """simple docstring""" return f"""{self.data}""" class A__( Generic[T] ): def __init__( self : str ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = None def __iter__( self : Union[str, Any] ) -> Iterator[T]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.top while node: yield node.data __SCREAMING_SNAKE_CASE = node.next def __str__( self : List[str] ) -> str: """simple docstring""" return "->".join([str(UpperCAmelCase_ ) for item in self] ) def __len__( self : str ) -> int: """simple docstring""" return len(tuple(iter(self ) ) ) def _a ( self : Union[str, Any] ) -> bool: """simple docstring""" return self.top is None def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : T ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = Node(UpperCAmelCase_ ) if not self.is_empty(): __SCREAMING_SNAKE_CASE = self.top __SCREAMING_SNAKE_CASE = node def _a ( self : Dict ) -> T: """simple docstring""" if self.is_empty(): raise IndexError('''pop from empty stack''' ) assert isinstance(self.top , UpperCAmelCase_ ) __SCREAMING_SNAKE_CASE = self.top __SCREAMING_SNAKE_CASE = self.top.next return pop_node.data def _a ( self : Optional[int] ) -> T: """simple docstring""" if self.is_empty(): raise IndexError('''peek from empty stack''' ) assert self.top is not None return self.top.data def _a ( self : int ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = None if __name__ == "__main__": from doctest import testmod testmod()
702
"""simple docstring""" import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ =get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class A__( __magic_name__ , unittest.TestCase ): lowerCAmelCase = XLMRobertaTokenizer lowerCAmelCase = XLMRobertaTokenizerFast lowerCAmelCase = True lowerCAmelCase = True def _a ( self : Optional[int] ) -> List[Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE ) tokenizer.save_pretrained(self.tmpdirname ) def _a ( self : str ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = '''<pad>''' __SCREAMING_SNAKE_CASE = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) def _a ( self : int ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_02 ) def _a ( self : Tuple ) -> Optional[int]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 10_02 ) def _a ( self : int ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) __SCREAMING_SNAKE_CASE = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( __SCREAMING_SNAKE_CASE , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) __SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) self.assertListEqual( __SCREAMING_SNAKE_CASE , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) __SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ) self.assertListEqual( __SCREAMING_SNAKE_CASE , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def _a ( self : int ) -> Tuple: """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __SCREAMING_SNAKE_CASE = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) __SCREAMING_SNAKE_CASE = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Checks everything loads correctly in the same way __SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(__SCREAMING_SNAKE_CASE ) # Save tokenizer rust, legacy_format=True __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE ) # Checks it save with the same files self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Checks everything loads correctly in the same way __SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) shutil.rmtree(__SCREAMING_SNAKE_CASE ) # Save tokenizer rust, legacy_format=False __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way __SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) shutil.rmtree(__SCREAMING_SNAKE_CASE ) @cached_property def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' ) def _a ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" with tempfile.NamedTemporaryFile() as f: shutil.copyfile(__SCREAMING_SNAKE_CASE , f.name ) __SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(f.name , keep_accents=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = pickle.dumps(__SCREAMING_SNAKE_CASE ) pickle.loads(__SCREAMING_SNAKE_CASE ) def _a ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" if not self.test_rust_tokenizer: return __SCREAMING_SNAKE_CASE = self.get_tokenizer() __SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE = '''I was born in 92000, and this is falsé.''' __SCREAMING_SNAKE_CASE = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE = tokenizer.encode(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @slow def _a ( self : Any ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = '''Hello World!''' __SCREAMING_SNAKE_CASE = [0, 3_53_78, 66_61, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) ) @slow def _a ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) __SCREAMING_SNAKE_CASE = [ 0, 32_93, 83, 10, 45_52, 49_89, 79_86, 6_78, 10, 59_15, 1_11, 17_94_59, 12_48_50, 4, 60_44, 2_37, 12, 6, 5, 6, 4, 67_80, 7_05, 15, 13_88, 44, 3_78, 1_01_14, 7_11, 1_52, 20, 6, 5, 2_23_76, 6_42, 12_21, 1_51_90, 3_41_53, 4_50, 56_08, 9_59, 11_19, 5_77_02, 1_36, 1_86, 47, 10_98, 2_93_67, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 60_44, 2_37, 62_84, 5_09_01, 5_28, 31, 90, 34, 9_27, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) ) @slow def _a ( self : Optional[int] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = {'''input_ids''': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
690
0
"""simple docstring""" from math import isqrt def _a ( UpperCAmelCase__ ) -> list[int]: __SCREAMING_SNAKE_CASE = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , UpperCAmelCase__ , UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = False return [i for i in range(2 , UpperCAmelCase__ ) if is_prime[i]] def _a ( UpperCAmelCase__ = 10**8 ) -> int: __SCREAMING_SNAKE_CASE = calculate_prime_numbers(max_number // 2 ) __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(F'''{solution() = }''')
703
"""simple docstring""" from __future__ import annotations lowerCAmelCase__ =8.9_8_8E9 # units = N * m^s * C^-2 def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> dict[str, float]: __SCREAMING_SNAKE_CASE = abs(chargea * chargea ) if (force, chargea, chargea, distance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if distance < 0: raise ValueError('''Distance cannot be negative''' ) if force == 0: __SCREAMING_SNAKE_CASE = COULOMBS_CONSTANT * charge_product / (distance**2) return {"force": force} elif chargea == 0: __SCREAMING_SNAKE_CASE = abs(UpperCAmelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge1": chargea} elif chargea == 0: __SCREAMING_SNAKE_CASE = abs(UpperCAmelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge2": chargea} elif distance == 0: __SCREAMING_SNAKE_CASE = (COULOMBS_CONSTANT * charge_product / abs(UpperCAmelCase__ )) ** 0.5 return {"distance": distance} raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
690
0
"""simple docstring""" import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A__( __magic_name__ , unittest.TestCase ): lowerCAmelCase = None lowerCAmelCase = BloomTokenizerFast lowerCAmelCase = BloomTokenizerFast lowerCAmelCase = True lowerCAmelCase = False lowerCAmelCase = '''tokenizer_file''' lowerCAmelCase = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''} def _a ( self : int ) -> int: """simple docstring""" super().setUp() __SCREAMING_SNAKE_CASE = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' ) tokenizer.save_pretrained(self.tmpdirname ) def _a ( self : int , **__SCREAMING_SNAKE_CASE : List[str] ) -> Union[str, Any]: """simple docstring""" kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def _a ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE = ['The quick brown fox</s>', 'jumps over the lazy dog</s>'] __SCREAMING_SNAKE_CASE = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]] __SCREAMING_SNAKE_CASE = tokenizer.batch_encode_plus(UpperCamelCase_ )['input_ids'] self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = tokenizer.batch_decode(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any]=6 ) -> List[str]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input __SCREAMING_SNAKE_CASE = 'This is a simple input' __SCREAMING_SNAKE_CASE = ['This is a simple input 1', 'This is a simple input 2'] __SCREAMING_SNAKE_CASE = ('This is a simple input', 'This is a pair') __SCREAMING_SNAKE_CASE = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests try: tokenizer_r.encode(UpperCamelCase_ , max_length=UpperCamelCase_ ) tokenizer_r.encode_plus(UpperCamelCase_ , max_length=UpperCamelCase_ ) tokenizer_r.batch_encode_plus(UpperCamelCase_ , max_length=UpperCamelCase_ ) tokenizer_r.encode(UpperCamelCase_ , max_length=UpperCamelCase_ ) tokenizer_r.batch_encode_plus(UpperCamelCase_ , max_length=UpperCamelCase_ ) except ValueError: self.fail('''Bloom Tokenizer should be able to deal with padding''' ) __SCREAMING_SNAKE_CASE = None # Hotfixing padding = None self.assertRaises(UpperCamelCase_ , tokenizer_r.encode , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='''max_length''' ) # Simple input self.assertRaises(UpperCamelCase_ , tokenizer_r.encode_plus , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='''max_length''' ) # Simple input self.assertRaises( UpperCamelCase_ , tokenizer_r.batch_encode_plus , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='''max_length''' , ) # Pair input self.assertRaises(UpperCamelCase_ , tokenizer_r.encode , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='''max_length''' ) # Pair input self.assertRaises(UpperCamelCase_ , tokenizer_r.encode_plus , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='''max_length''' ) # Pair input self.assertRaises( UpperCamelCase_ , tokenizer_r.batch_encode_plus , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='''max_length''' , ) def _a ( self : int ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = next(iter(UpperCamelCase_ ) )['premise'] # pick up one data __SCREAMING_SNAKE_CASE = list(sample_data.values() ) __SCREAMING_SNAKE_CASE = list(map(tokenizer.encode , UpperCamelCase_ ) ) __SCREAMING_SNAKE_CASE = [tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ ) for x in output_tokens] self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) def _a ( self : str ) -> Optional[Any]: """simple docstring""" self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
704
"""simple docstring""" import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ =logging.get_logger(__name__) def _a ( UpperCAmelCase__ ) -> Tuple: __SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , map_location='''cpu''' ) if "model" in sd.keys(): __SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , map_location='''cpu''' )['''model'''] # pop unnecessary weights __SCREAMING_SNAKE_CASE = [ '''decoder.version''', '''decoder.output_projection.weight''', ] for key in keys_to_delete: if key in sd: sd.pop(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = { '''decoder.project_in_dim.weight''': '''decoder.project_in.weight''', '''decoder.project_out_dim.weight''': '''decoder.project_out.weight''', '''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''', } for old_key, new_key in keys_to_rename.items(): if old_key in sd: __SCREAMING_SNAKE_CASE = sd.pop(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = list(sd.keys() ) for key in keys: if ".qkv_proj." in key: __SCREAMING_SNAKE_CASE = sd[key] # We split QKV in separate Q,K,V __SCREAMING_SNAKE_CASE = key.replace('''.qkv_proj.''' , '''.q_proj.''' ) __SCREAMING_SNAKE_CASE = key.replace('''.qkv_proj.''' , '''.k_proj.''' ) __SCREAMING_SNAKE_CASE = key.replace('''.qkv_proj.''' , '''.v_proj.''' ) __SCREAMING_SNAKE_CASE = value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch.split(UpperCAmelCase__ , depth // 3 , dim=0 ) __SCREAMING_SNAKE_CASE = q __SCREAMING_SNAKE_CASE = k __SCREAMING_SNAKE_CASE = v del sd[key] return sd @torch.no_grad() def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = load_checkpoint(UpperCAmelCase__ ) if config is not None: __SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained(UpperCAmelCase__ ) else: __SCREAMING_SNAKE_CASE = OPTConfig() __SCREAMING_SNAKE_CASE = OPTModel(UpperCAmelCase__ ).half().eval() model.load_state_dict(UpperCAmelCase__ ) # Check results Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ ) model.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": lowerCAmelCase__ =argparse.ArgumentParser() # Required parameters parser.add_argument( "--fairseq_path", type=str, help=( "path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:" " https://huggingface.co/models?other=opt_metasq" ), ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.") lowerCAmelCase__ =parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
690
0
from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class A__: lowerCAmelCase = MBartConfig lowerCAmelCase = {} lowerCAmelCase = '''gelu''' def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict=13 , __SCREAMING_SNAKE_CASE : Tuple=7 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=99 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Tuple=4 , __SCREAMING_SNAKE_CASE : List[Any]=37 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=20 , __SCREAMING_SNAKE_CASE : Dict=2 , __SCREAMING_SNAKE_CASE : str=1 , __SCREAMING_SNAKE_CASE : Dict=0 , ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = eos_token_id __SCREAMING_SNAKE_CASE = pad_token_id __SCREAMING_SNAKE_CASE = bos_token_id def _a ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __SCREAMING_SNAKE_CASE = prepare_mbart_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) return config, inputs_dict def _a ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = TFMBartModel(config=__lowerCamelCase ).get_decoder() __SCREAMING_SNAKE_CASE = inputs_dict["input_ids"] __SCREAMING_SNAKE_CASE = input_ids[:1, :] __SCREAMING_SNAKE_CASE = inputs_dict["attention_mask"][:1, :] __SCREAMING_SNAKE_CASE = inputs_dict["head_mask"] __SCREAMING_SNAKE_CASE = 1 # first forward pass __SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase , use_cache=__lowerCamelCase ) __SCREAMING_SNAKE_CASE = outputs.to_tuple() __SCREAMING_SNAKE_CASE = past_key_values[1] def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , ) -> Union[str, Any]: if attention_mask is None: __SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(_A , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __SCREAMING_SNAKE_CASE = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class A__( lowercase__ , lowercase__ , unittest.TestCase ): lowerCAmelCase = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () lowerCAmelCase = (TFMBartForConditionalGeneration,) if is_tf_available() else () lowerCAmelCase = ( { '''conversational''': TFMBartForConditionalGeneration, '''feature-extraction''': TFMBartModel, '''summarization''': TFMBartForConditionalGeneration, '''text2text-generation''': TFMBartForConditionalGeneration, '''translation''': TFMBartForConditionalGeneration, } if is_tf_available() else {} ) lowerCAmelCase = True lowerCAmelCase = False lowerCAmelCase = False def _a ( self : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple ) -> Tuple: """simple docstring""" if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def _a ( self : Optional[int] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFMBartModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase ) def _a ( self : List[str] ) -> str: """simple docstring""" self.config_tester.run_common_tests() def _a ( self : Any ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase ) @require_sentencepiece @require_tokenizers @require_tf class A__( unittest.TestCase ): lowerCAmelCase = [ ''' UN Chief Says There Is No Military Solution in Syria''', ] lowerCAmelCase = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', ] lowerCAmelCase = '''facebook/mbart-large-en-ro''' @cached_property def _a ( self : Optional[int] ) -> Dict: """simple docstring""" return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def _a ( self : List[Any] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def _a ( self : Any , **__SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.translate_src_text(**__lowerCamelCase ) self.assertListEqual(self.expected_text , __lowerCamelCase ) def _a ( self : Optional[int] , **__SCREAMING_SNAKE_CASE : List[str] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , **__lowerCamelCase , return_tensors='''tf''' ) __SCREAMING_SNAKE_CASE = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) __SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) return generated_words @slow def _a ( self : Optional[int] ) -> int: """simple docstring""" self._assert_generated_batch_equal_expected()
705
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class A__( __magic_name__ ): lowerCAmelCase = '''naver-clova-ix/donut-base-finetuned-docvqa''' lowerCAmelCase = ( '''This is a tool that answers a question about an document (pdf). It takes an input named `document` which ''' '''should be the document containing the information, as well as a `question` that is the question about the ''' '''document. It returns a text that contains the answer to the question.''' ) lowerCAmelCase = '''document_qa''' lowerCAmelCase = AutoProcessor lowerCAmelCase = VisionEncoderDecoderModel lowerCAmelCase = ['''image''', '''text'''] lowerCAmelCase = ['''text'''] def __init__( self : str , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Any: """simple docstring""" if not is_vision_available(): raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' ) super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : "Image" , __SCREAMING_SNAKE_CASE : str ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>''' __SCREAMING_SNAKE_CASE = task_prompt.replace('''{user_input}''' , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.pre_processor.tokenizer( __SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_ids __SCREAMING_SNAKE_CASE = self.pre_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple: """simple docstring""" return self.model.generate( inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__SCREAMING_SNAKE_CASE , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__SCREAMING_SNAKE_CASE , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__SCREAMING_SNAKE_CASE , ).sequences def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.pre_processor.batch_decode(__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' ) __SCREAMING_SNAKE_CASE = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' ) __SCREAMING_SNAKE_CASE = re.sub(r'''<.*?>''' , '''''' , __SCREAMING_SNAKE_CASE , count=1 ).strip() # remove first task start token __SCREAMING_SNAKE_CASE = self.pre_processor.tokenajson(__SCREAMING_SNAKE_CASE ) return sequence["answer"]
690
0
"""simple docstring""" import functools from typing import Any def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ , UpperCAmelCase__ ) -> bool: # Validation if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or len(lowerCamelCase_ ) == 0: raise ValueError('''the string should be not empty string''' ) if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or not all( isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) > 0 for item in words ): raise ValueError('''the words should be a list of non-empty strings''' ) # Build trie __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = 'WORD_KEEPER' for word in words: __SCREAMING_SNAKE_CASE = trie for c in word: if c not in trie_node: __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = trie_node[c] __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = len(lowerCamelCase_ ) # Dynamic programming method @functools.cache def is_breakable(UpperCAmelCase__ ) -> bool: if index == len_string: return True __SCREAMING_SNAKE_CASE = trie for i in range(lowerCamelCase_ , lowerCamelCase_ ): __SCREAMING_SNAKE_CASE = trie_node.get(string[i] , lowerCamelCase_ ) if trie_node is None: return False if trie_node.get(lowerCamelCase_ , lowerCamelCase_ ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
706
"""simple docstring""" import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class A__( unittest.TestCase ): @property def _a ( self : Optional[Any] ) -> Tuple: """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def _a ( self : str ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.dummy_uncond_unet __SCREAMING_SNAKE_CASE = KarrasVeScheduler() __SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' , return_dict=__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __SCREAMING_SNAKE_CASE = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class A__( unittest.TestCase ): def _a ( self : Any ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = '''google/ncsnpp-celebahq-256''' __SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = KarrasVeScheduler() __SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe(num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) __SCREAMING_SNAKE_CASE = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
690
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ =logging.get_logger(__name__) lowerCAmelCase__ ={ "facebook/s2t-small-librispeech-asr": ( "https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class A__( UpperCAmelCase_ ): lowerCAmelCase = 'speech_to_text' lowerCAmelCase = ['past_key_values'] lowerCAmelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : int , __SCREAMING_SNAKE_CASE : Optional[int]=1_00_00 , __SCREAMING_SNAKE_CASE : int=12 , __SCREAMING_SNAKE_CASE : List[Any]=20_48 , __SCREAMING_SNAKE_CASE : Any=4 , __SCREAMING_SNAKE_CASE : Tuple=6 , __SCREAMING_SNAKE_CASE : List[Any]=20_48 , __SCREAMING_SNAKE_CASE : List[Any]=4 , __SCREAMING_SNAKE_CASE : Dict=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : List[Any]="relu" , __SCREAMING_SNAKE_CASE : List[Any]=2_56 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[str]=1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=60_00 , __SCREAMING_SNAKE_CASE : Optional[int]=10_24 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : Any=(5, 5) , __SCREAMING_SNAKE_CASE : List[str]=10_24 , __SCREAMING_SNAKE_CASE : str=80 , __SCREAMING_SNAKE_CASE : Optional[Any]=1 , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = d_model __SCREAMING_SNAKE_CASE = encoder_ffn_dim __SCREAMING_SNAKE_CASE = encoder_layers __SCREAMING_SNAKE_CASE = encoder_attention_heads __SCREAMING_SNAKE_CASE = decoder_ffn_dim __SCREAMING_SNAKE_CASE = decoder_layers __SCREAMING_SNAKE_CASE = decoder_attention_heads __SCREAMING_SNAKE_CASE = dropout __SCREAMING_SNAKE_CASE = attention_dropout __SCREAMING_SNAKE_CASE = activation_dropout __SCREAMING_SNAKE_CASE = activation_function __SCREAMING_SNAKE_CASE = init_std __SCREAMING_SNAKE_CASE = encoder_layerdrop __SCREAMING_SNAKE_CASE = decoder_layerdrop __SCREAMING_SNAKE_CASE = use_cache __SCREAMING_SNAKE_CASE = encoder_layers __SCREAMING_SNAKE_CASE = scale_embedding # scale factor will be sqrt(d_model) if True __SCREAMING_SNAKE_CASE = max_source_positions __SCREAMING_SNAKE_CASE = max_target_positions __SCREAMING_SNAKE_CASE = num_conv_layers __SCREAMING_SNAKE_CASE = list(_lowercase ) __SCREAMING_SNAKE_CASE = conv_channels __SCREAMING_SNAKE_CASE = input_feat_per_channel __SCREAMING_SNAKE_CASE = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` ''' f"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """ f"""`config.num_conv_layers = {self.num_conv_layers}`.""" ) super().__init__( pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , **_lowercase , )
707
"""simple docstring""" import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase__ =logging.get_logger(__name__) lowerCAmelCase__ ={"vocab_file": "spiece.model"} lowerCAmelCase__ ={ "vocab_file": { "AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model", } } lowerCAmelCase__ ={ "AI-Sweden/gpt-sw3-126m": 2_048, "AI-Sweden/gpt-sw3-350m": 2_048, "AI-Sweden/gpt-sw3-1.6b": 2_048, "AI-Sweden/gpt-sw3-6.7b": 2_048, "AI-Sweden/gpt-sw3-20b": 2_048, } class A__( __magic_name__ ): lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = ['''input_ids''', '''attention_mask'''] def __init__( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Dict , ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs __SCREAMING_SNAKE_CASE = kwargs.get('''name_or_path''' ) if name_or_path is None: logger.warning( '''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,''' ''' you are testing the model, this can safely be ignored''' ) __SCREAMING_SNAKE_CASE = '''None''' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing __SCREAMING_SNAKE_CASE = '''<|endoftext|>''' if eos_token is None else eos_token __SCREAMING_SNAKE_CASE = '''<unk>''' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: __SCREAMING_SNAKE_CASE = unk_token if pad_token is None else pad_token __SCREAMING_SNAKE_CASE = eos_token if bos_token is None else bos_token else: __SCREAMING_SNAKE_CASE = '''<pad>''' if pad_token is None else pad_token __SCREAMING_SNAKE_CASE = '''<s>''' if bos_token is None else bos_token super().__init__( do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = do_lower_case __SCREAMING_SNAKE_CASE = remove_space __SCREAMING_SNAKE_CASE = keep_accents __SCREAMING_SNAKE_CASE = vocab_file __SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__SCREAMING_SNAKE_CASE ) # Used for whitespace normalization in input texts # fmt : off __SCREAMING_SNAKE_CASE = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', '''„'''} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing __SCREAMING_SNAKE_CASE = re.compile( f"""[{"".join(map(__SCREAMING_SNAKE_CASE , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(1_27 , 1_60 ) ) + [1_60, 1_73, 82_03] ) )}]""" ) def __getstate__( self : List[str] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.__dict__.copy() __SCREAMING_SNAKE_CASE = None return state def __setstate__( self : int , __SCREAMING_SNAKE_CASE : Optional[int] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def _a ( self : Optional[Any] ) -> int: """simple docstring""" return len(self.sp_model ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.non_printing_characters_re.sub('''''' , __SCREAMING_SNAKE_CASE ) # Normalize whitespaces __SCREAMING_SNAKE_CASE = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] ) # NFC Unicode normalization __SCREAMING_SNAKE_CASE = unicodedata.normalize('''NFC''' , __SCREAMING_SNAKE_CASE ) return text def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.preprocess_text(__SCREAMING_SNAKE_CASE ) return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : str ) -> int: """simple docstring""" return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> str: """simple docstring""" return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE ) @staticmethod def _a ( __SCREAMING_SNAKE_CASE : str ) -> str: """simple docstring""" return out_string def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = '''''' __SCREAMING_SNAKE_CASE = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = [] else: current_sub_tokens.append(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = False out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) return out_string def _a ( self : Union[str, Any] ) -> Dict[str, int]: """simple docstring""" __SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __SCREAMING_SNAKE_CASE = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi: __SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE ) return (out_vocab_file,) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: """simple docstring""" if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = self.preprocess_text(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.sp_model.encode(__SCREAMING_SNAKE_CASE ) else: __SCREAMING_SNAKE_CASE = [self.preprocess_text(__SCREAMING_SNAKE_CASE ) for t in text] __SCREAMING_SNAKE_CASE = self.sp_model.encode(__SCREAMING_SNAKE_CASE ) if return_tensors is True or return_tensors == "pt": __SCREAMING_SNAKE_CASE = torch.tensor(__SCREAMING_SNAKE_CASE ) return token_ids def _a ( self : Any , __SCREAMING_SNAKE_CASE : Union[int, List[int]] ) -> str: """simple docstring""" return self.sp_model.decode(__SCREAMING_SNAKE_CASE ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : "Conversation" ) -> List[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()] __SCREAMING_SNAKE_CASE = ( f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(__SCREAMING_SNAKE_CASE ) + f"""{self.bos_token}Bot:""" ) return self.encode(text=__SCREAMING_SNAKE_CASE )
690
0
from PIL import Image def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> Image: def brightness(UpperCAmelCase__ ) -> float: return 1_28 + level + (c - 1_28) if not -255.0 <= level <= 255.0: raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' ) return img.point(__A ) if __name__ == "__main__": # Load image with Image.open("image_data/lena.jpg") as img: # Change brightness to 100 lowerCAmelCase__ =change_brightness(img, 100) brigt_img.save("image_data/lena_brightness.png", format="png")
708
"""simple docstring""" from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent lowerCAmelCase__ ={"UserAgent": UserAgent().random} def _a ( UpperCAmelCase__ ) -> dict: __SCREAMING_SNAKE_CASE = script.contents[0] __SCREAMING_SNAKE_CASE = json.loads(data[data.find('''{"config"''' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class A__: def __init__( self : Dict , __SCREAMING_SNAKE_CASE : int ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = f"""https://www.instagram.com/{username}/""" __SCREAMING_SNAKE_CASE = self.get_json() def _a ( self : List[Any] ) -> dict: """simple docstring""" __SCREAMING_SNAKE_CASE = requests.get(self.url , headers=__SCREAMING_SNAKE_CASE ).text __SCREAMING_SNAKE_CASE = BeautifulSoup(__SCREAMING_SNAKE_CASE , '''html.parser''' ).find_all('''script''' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : Tuple ) -> str: """simple docstring""" return f"""{self.__class__.__name__}('{self.username}')""" def __str__( self : Optional[int] ) -> str: """simple docstring""" return f"""{self.fullname} ({self.username}) is {self.biography}""" @property def _a ( self : Tuple ) -> str: """simple docstring""" return self.user_data["username"] @property def _a ( self : List[Any] ) -> str: """simple docstring""" return self.user_data["full_name"] @property def _a ( self : Optional[Any] ) -> str: """simple docstring""" return self.user_data["biography"] @property def _a ( self : List[str] ) -> str: """simple docstring""" return self.user_data["business_email"] @property def _a ( self : Any ) -> str: """simple docstring""" return self.user_data["external_url"] @property def _a ( self : Any ) -> int: """simple docstring""" return self.user_data["edge_followed_by"]["count"] @property def _a ( self : Dict ) -> int: """simple docstring""" return self.user_data["edge_follow"]["count"] @property def _a ( self : str ) -> int: """simple docstring""" return self.user_data["edge_owner_to_timeline_media"]["count"] @property def _a ( self : Union[str, Any] ) -> str: """simple docstring""" return self.user_data["profile_pic_url_hd"] @property def _a ( self : Tuple ) -> bool: """simple docstring""" return self.user_data["is_verified"] @property def _a ( self : Union[str, Any] ) -> bool: """simple docstring""" return self.user_data["is_private"] def _a ( UpperCAmelCase__ = "github" ) -> None: import os if os.environ.get('''CI''' ): return # test failing on GitHub Actions __SCREAMING_SNAKE_CASE = InstagramUser(UpperCAmelCase__ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , UpperCAmelCase__ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_50 assert instagram_user.number_of_followers > 12_00_00 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "[email protected]" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('''https://instagram.''' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase__ =InstagramUser("github") print(instagram_user) print(F'''{instagram_user.number_of_posts = }''') print(F'''{instagram_user.number_of_followers = }''') print(F'''{instagram_user.number_of_followings = }''') print(F'''{instagram_user.email = }''') print(F'''{instagram_user.website = }''') print(F'''{instagram_user.profile_picture_url = }''') print(F'''{instagram_user.is_verified = }''') print(F'''{instagram_user.is_private = }''')
690
0
"""simple docstring""" import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex lowerCAmelCase__ =logging.getLogger(__name__) class A__: '''simple docstring''' def __init__( self : Optional[int] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = False def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int: """simple docstring""" if not self.initialized: __SCREAMING_SNAKE_CASE = RagRetriever( _lowerCAmelCase , question_encoder_tokenizer=_lowerCAmelCase , generator_tokenizer=_lowerCAmelCase , index=_lowerCAmelCase , init_retrieval=_lowerCAmelCase , ) __SCREAMING_SNAKE_CASE = True def _a ( self : int ) -> List[str]: """simple docstring""" self.retriever.index.init_index() def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.retriever._main_retrieve(_lowerCAmelCase , _lowerCAmelCase ) return doc_ids, retrieved_doc_embeds class A__( __magic_name__ ): '''simple docstring''' def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict=None ) -> Dict: """simple docstring""" if index is not None and index.is_initialized() and len(_lowerCAmelCase ) > 0: raise ValueError( '''When using Ray for distributed fine-tuning, ''' '''you\'ll need to provide the paths instead, ''' '''as the dataset and the index are loaded ''' '''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' ) super().__init__( _lowerCAmelCase , question_encoder_tokenizer=_lowerCAmelCase , generator_tokenizer=_lowerCAmelCase , index=_lowerCAmelCase , init_retrieval=_lowerCAmelCase , ) __SCREAMING_SNAKE_CASE = retrieval_workers if len(self.retrieval_workers ) > 0: ray.get( [ worker.create_rag_retriever.remote(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for worker in self.retrieval_workers ] ) def _a ( self : Dict ) -> Optional[Any]: """simple docstring""" logger.info('''initializing retrieval''' ) if len(self.retrieval_workers ) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] ) else: # Non-distributed training. Load index into this same process. self.index.init_index() def _a ( self : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]: """simple docstring""" if len(self.retrieval_workers ) > 0: # Select a random retrieval actor. __SCREAMING_SNAKE_CASE = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = ray.get(random_worker.retrieve.remote(_lowerCAmelCase , _lowerCAmelCase ) ) else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self._main_retrieve(_lowerCAmelCase , _lowerCAmelCase ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_lowerCAmelCase ) @classmethod def _a ( cls : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any=None , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple: """simple docstring""" return super(_lowerCAmelCase , cls ).get_tokenizers(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ) @classmethod def _a ( cls : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str=None , **__SCREAMING_SNAKE_CASE : str ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = kwargs.pop('''config''' , _lowerCAmelCase ) or RagConfig.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase ) __SCREAMING_SNAKE_CASE = RagTokenizer.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase ) __SCREAMING_SNAKE_CASE = rag_tokenizer.question_encoder __SCREAMING_SNAKE_CASE = rag_tokenizer.generator if indexed_dataset is not None: __SCREAMING_SNAKE_CASE = '''custom''' __SCREAMING_SNAKE_CASE = CustomHFIndex(config.retrieval_vector_size , _lowerCAmelCase ) else: __SCREAMING_SNAKE_CASE = cls._build_index(_lowerCAmelCase ) return cls( _lowerCAmelCase , question_encoder_tokenizer=_lowerCAmelCase , generator_tokenizer=_lowerCAmelCase , retrieval_workers=_lowerCAmelCase , index=_lowerCAmelCase , )
709
"""simple docstring""" from sklearn.metrics import recall_score import datasets lowerCAmelCase__ ="\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n" lowerCAmelCase__ ="\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n" lowerCAmelCase__ ="\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__( datasets.Metric ): def _a ( self : Any ) -> int: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ), '''references''': datasets.Sequence(datasets.Value('''int32''' ) ), } if self.config_name == '''multilabel''' else { '''predictions''': datasets.Value('''int32''' ), '''references''': datasets.Value('''int32''' ), } ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]=1 , __SCREAMING_SNAKE_CASE : Optional[Any]="binary" , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : List[Any]="warn" , ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = recall_score( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , pos_label=__SCREAMING_SNAKE_CASE , average=__SCREAMING_SNAKE_CASE , sample_weight=__SCREAMING_SNAKE_CASE , zero_division=__SCREAMING_SNAKE_CASE , ) return {"recall": float(__SCREAMING_SNAKE_CASE ) if score.size == 1 else score}
690
0
"""simple docstring""" from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A__: def __init__( self : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple=13 , __SCREAMING_SNAKE_CASE : str=30 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Any=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Tuple=4 , __SCREAMING_SNAKE_CASE : Dict=37 , __SCREAMING_SNAKE_CASE : Dict="gelu" , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=10 , __SCREAMING_SNAKE_CASE : List[str]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : Tuple=None , ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = patch_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2 __SCREAMING_SNAKE_CASE = num_patches + 1 def _a ( self : Optional[int] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values, labels def _a ( self : Tuple ) -> List[str]: """simple docstring""" return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFViTModel(config=UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = model(UpperCamelCase_ , training=UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. __SCREAMING_SNAKE_CASE = self.image_size // 2 __SCREAMING_SNAKE_CASE = pixel_values[:, :, :image_size, :image_size] __SCREAMING_SNAKE_CASE = model(UpperCamelCase_ , interpolate_pos_encoding=UpperCamelCase_ , training=UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.type_sequence_label_size __SCREAMING_SNAKE_CASE = TFViTForImageClassification(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = model(UpperCamelCase_ , labels=UpperCamelCase_ , training=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. __SCREAMING_SNAKE_CASE = self.image_size // 2 __SCREAMING_SNAKE_CASE = pixel_values[:, :, :image_size, :image_size] __SCREAMING_SNAKE_CASE = model(UpperCamelCase_ , interpolate_pos_encoding=UpperCamelCase_ , training=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = TFViTForImageClassification(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __SCREAMING_SNAKE_CASE = model(UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _a ( self : Optional[int] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = config_and_inputs __SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class A__( __magic_name__ , __magic_name__ , unittest.TestCase ): lowerCAmelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () lowerCAmelCase = ( {"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification} if is_tf_available() else {} ) lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False def _a ( self : List[Any] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFViTModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 ) def _a ( self : Union[str, Any] ) -> Dict: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def _a ( self : Tuple ) -> Optional[int]: """simple docstring""" pass @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def _a ( self : Tuple ) -> Optional[int]: """simple docstring""" pass def _a ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class(UpperCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) __SCREAMING_SNAKE_CASE = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase_ , tf.keras.layers.Layer ) ) def _a ( self : str ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] __SCREAMING_SNAKE_CASE = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase_ ) def _a ( self : str ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def _a ( self : Union[str, Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ ) @slow def _a ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(UpperCamelCase_ ) def _a ( ) -> Dict: __SCREAMING_SNAKE_CASE = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class A__( unittest.TestCase ): @cached_property def _a ( self : str ) -> List[str]: """simple docstring""" return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None @slow def _a ( self : Tuple ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ) __SCREAMING_SNAKE_CASE = self.default_image_processor __SCREAMING_SNAKE_CASE = prepare_img() __SCREAMING_SNAKE_CASE = image_processor(images=UpperCamelCase_ , return_tensors='''tf''' ) # forward pass __SCREAMING_SNAKE_CASE = model(**UpperCamelCase_ ) # verify the logits __SCREAMING_SNAKE_CASE = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = tf.constant([-0.27_44, 0.82_15, -0.08_36] ) tf.debugging.assert_near(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 )
710
"""simple docstring""" def _a ( UpperCAmelCase__ = 10**9 ) -> int: __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 2 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value __SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(F'''{solution() = }''')
690
0
"""simple docstring""" from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function lowerCAmelCase__ =1.0_5_4_5_7_1_8_1_7E-3_4 # unit of ℏ : J * s lowerCAmelCase__ =3E8 # unit of c : m * s^-1 def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]: if (force, area, distance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if force < 0: raise ValueError('''Magnitude of force can not be negative''' ) if distance < 0: raise ValueError('''Distance can not be negative''' ) if area < 0: raise ValueError('''Area can not be negative''' ) if force == 0: __SCREAMING_SNAKE_CASE = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 2_40 * (distance) ** 4 ) return {"force": force} elif area == 0: __SCREAMING_SNAKE_CASE = (2_40 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: __SCREAMING_SNAKE_CASE = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_40 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError('''One and only one argument must be 0''' ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
711
"""simple docstring""" import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset lowerCAmelCase__ =pd.read_csv( "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/" "position_salaries.csv" ) lowerCAmelCase__ =dataset.iloc[:, 1:2].values lowerCAmelCase__ =dataset.iloc[:, 2].values lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ =train_test_split(X, y, test_size=0.2, random_state=0) lowerCAmelCase__ =PolynomialFeatures(degree=4) lowerCAmelCase__ =poly_reg.fit_transform(X) lowerCAmelCase__ =LinearRegression() pol_reg.fit(X_poly, y) def _a ( ) -> List[Any]: plt.scatter(UpperCAmelCase__ , UpperCAmelCase__ , color='''red''' ) plt.plot(UpperCAmelCase__ , pol_reg.predict(poly_reg.fit_transform(UpperCAmelCase__ ) ) , color='''blue''' ) plt.title('''Truth or Bluff (Linear Regression)''' ) plt.xlabel('''Position level''' ) plt.ylabel('''Salary''' ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
690
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class A__: lowerCAmelCase = MBartConfig lowerCAmelCase = {} lowerCAmelCase = """gelu""" def __init__( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int]=13 , __SCREAMING_SNAKE_CASE : Optional[Any]=7 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : int=99 , __SCREAMING_SNAKE_CASE : int=32 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Tuple=4 , __SCREAMING_SNAKE_CASE : Optional[Any]=37 , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : str=20 , __SCREAMING_SNAKE_CASE : Dict=2 , __SCREAMING_SNAKE_CASE : List[Any]=1 , __SCREAMING_SNAKE_CASE : List[Any]=0 , ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = eos_token_id __SCREAMING_SNAKE_CASE = pad_token_id __SCREAMING_SNAKE_CASE = bos_token_id def _a ( self : str ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __SCREAMING_SNAKE_CASE = prepare_mbart_inputs_dict(_a , _a , _a ) return config, inputs_dict def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFMBartModel(config=_a ).get_decoder() __SCREAMING_SNAKE_CASE = inputs_dict["""input_ids"""] __SCREAMING_SNAKE_CASE = input_ids[:1, :] __SCREAMING_SNAKE_CASE = inputs_dict["""attention_mask"""][:1, :] __SCREAMING_SNAKE_CASE = inputs_dict["""head_mask"""] __SCREAMING_SNAKE_CASE = 1 # first forward pass __SCREAMING_SNAKE_CASE = model(_a , attention_mask=_a , head_mask=_a , use_cache=_a ) __SCREAMING_SNAKE_CASE = outputs.to_tuple() __SCREAMING_SNAKE_CASE = past_key_values[1] def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , ) -> Any: if attention_mask is None: __SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __SCREAMING_SNAKE_CASE = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class A__( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): lowerCAmelCase = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () lowerCAmelCase = (TFMBartForConditionalGeneration,) if is_tf_available() else () lowerCAmelCase = ( { """conversational""": TFMBartForConditionalGeneration, """feature-extraction""": TFMBartModel, """summarization""": TFMBartForConditionalGeneration, """text2text-generation""": TFMBartForConditionalGeneration, """translation""": TFMBartForConditionalGeneration, } if is_tf_available() else {} ) lowerCAmelCase = True lowerCAmelCase = False lowerCAmelCase = False def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict: """simple docstring""" if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def _a ( self : Any ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = TFMBartModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_a ) def _a ( self : Union[str, Any] ) -> str: """simple docstring""" self.config_tester.run_common_tests() def _a ( self : List[Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_a ) @require_sentencepiece @require_tokenizers @require_tf class A__( unittest.TestCase ): lowerCAmelCase = [ """ UN Chief Says There Is No Military Solution in Syria""", ] lowerCAmelCase = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", ] lowerCAmelCase = """facebook/mbart-large-en-ro""" @cached_property def _a ( self : int ) -> str: """simple docstring""" return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def _a ( self : Tuple ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def _a ( self : Dict , **__SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.translate_src_text(**_a ) self.assertListEqual(self.expected_text , _a ) def _a ( self : Optional[Any] , **__SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , **_a , return_tensors='''tf''' ) __SCREAMING_SNAKE_CASE = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) __SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(_a , skip_special_tokens=_a ) return generated_words @slow def _a ( self : List[str] ) -> Union[str, Any]: """simple docstring""" self._assert_generated_batch_equal_expected()
712
"""simple docstring""" from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class A__: lowerCAmelCase = MBartConfig lowerCAmelCase = {} lowerCAmelCase = '''gelu''' def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple=13 , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=99 , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Any=4 , __SCREAMING_SNAKE_CASE : List[str]=37 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Any=20 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : Optional[int]=1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0 , ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = eos_token_id __SCREAMING_SNAKE_CASE = pad_token_id __SCREAMING_SNAKE_CASE = bos_token_id def _a ( self : List[str] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __SCREAMING_SNAKE_CASE = prepare_mbart_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return config, inputs_dict def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFMBartModel(config=__SCREAMING_SNAKE_CASE ).get_decoder() __SCREAMING_SNAKE_CASE = inputs_dict['''input_ids'''] __SCREAMING_SNAKE_CASE = input_ids[:1, :] __SCREAMING_SNAKE_CASE = inputs_dict['''attention_mask'''][:1, :] __SCREAMING_SNAKE_CASE = inputs_dict['''head_mask'''] __SCREAMING_SNAKE_CASE = 1 # first forward pass __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple() __SCREAMING_SNAKE_CASE = past_key_values[1] def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , ) -> Optional[int]: if attention_mask is None: __SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(UpperCAmelCase__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __SCREAMING_SNAKE_CASE = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class A__( __magic_name__ , __magic_name__ , unittest.TestCase ): lowerCAmelCase = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () lowerCAmelCase = (TFMBartForConditionalGeneration,) if is_tf_available() else () lowerCAmelCase = ( { '''conversational''': TFMBartForConditionalGeneration, '''feature-extraction''': TFMBartModel, '''summarization''': TFMBartForConditionalGeneration, '''text2text-generation''': TFMBartForConditionalGeneration, '''translation''': TFMBartForConditionalGeneration, } if is_tf_available() else {} ) lowerCAmelCase = True lowerCAmelCase = False lowerCAmelCase = False def _a ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]: """simple docstring""" if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def _a ( self : List[Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFMBartModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE ) def _a ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() def _a ( self : int ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__SCREAMING_SNAKE_CASE ) @require_sentencepiece @require_tokenizers @require_tf class A__( unittest.TestCase ): lowerCAmelCase = [ ''' UN Chief Says There Is No Military Solution in Syria''', ] lowerCAmelCase = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', ] lowerCAmelCase = '''facebook/mbart-large-en-ro''' @cached_property def _a ( self : Optional[int] ) -> str: """simple docstring""" return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def _a ( self : Optional[Any] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def _a ( self : Any , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.translate_src_text(**__SCREAMING_SNAKE_CASE ) self.assertListEqual(self.expected_text , __SCREAMING_SNAKE_CASE ) def _a ( self : Any , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , **__SCREAMING_SNAKE_CASE , return_tensors='''tf''' ) __SCREAMING_SNAKE_CASE = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) __SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) return generated_words @slow def _a ( self : Union[str, Any] ) -> Tuple: """simple docstring""" self._assert_generated_batch_equal_expected()
690
0
"""simple docstring""" import argparse import torch from transformers import ( UniSpeechSatConfig, UniSpeechSatForAudioFrameClassification, UniSpeechSatForSequenceClassification, UniSpeechSatForXVector, WavaVecaFeatureExtractor, logging, ) logging.set_verbosity_info() lowerCAmelCase__ =logging.get_logger(__name__) def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = UniSpeechSatForSequenceClassification.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = downstream_dict['''projector.weight'''] __SCREAMING_SNAKE_CASE = downstream_dict['''projector.bias'''] __SCREAMING_SNAKE_CASE = downstream_dict['''model.post_net.linear.weight'''] __SCREAMING_SNAKE_CASE = downstream_dict['''model.post_net.linear.bias'''] return model def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict: __SCREAMING_SNAKE_CASE = UniSpeechSatForAudioFrameClassification.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = downstream_dict['''model.linear.weight'''] __SCREAMING_SNAKE_CASE = downstream_dict['''model.linear.bias'''] return model def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> int: __SCREAMING_SNAKE_CASE = UniSpeechSatForXVector.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = downstream_dict['''connector.weight'''] __SCREAMING_SNAKE_CASE = downstream_dict['''connector.bias'''] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): __SCREAMING_SNAKE_CASE = downstream_dict[ f"""model.framelevel_feature_extractor.module.{i}.kernel.weight""" ] __SCREAMING_SNAKE_CASE = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""] __SCREAMING_SNAKE_CASE = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight'''] __SCREAMING_SNAKE_CASE = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias'''] __SCREAMING_SNAKE_CASE = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight'''] __SCREAMING_SNAKE_CASE = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias'''] __SCREAMING_SNAKE_CASE = downstream_dict['''objective.W'''] return model @torch.no_grad() def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[Any]: __SCREAMING_SNAKE_CASE = torch.load(__UpperCAmelCase , map_location='''cpu''' ) __SCREAMING_SNAKE_CASE = checkpoint['''Downstream'''] __SCREAMING_SNAKE_CASE = UniSpeechSatConfig.from_pretrained(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained( __UpperCAmelCase , return_attention_mask=__UpperCAmelCase , do_normalize=__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = hf_config.architectures[0] if arch.endswith('''ForSequenceClassification''' ): __SCREAMING_SNAKE_CASE = convert_classification(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) elif arch.endswith('''ForAudioFrameClassification''' ): __SCREAMING_SNAKE_CASE = convert_diarization(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) elif arch.endswith('''ForXVector''' ): __SCREAMING_SNAKE_CASE = convert_xvector(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) else: raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" ) if hf_config.use_weighted_layer_sum: __SCREAMING_SNAKE_CASE = checkpoint['''Featurizer''']['''weights'''] hf_feature_extractor.save_pretrained(__UpperCAmelCase ) hf_model.save_pretrained(__UpperCAmelCase ) if __name__ == "__main__": lowerCAmelCase__ =argparse.ArgumentParser() parser.add_argument( "--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model." ) parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.") parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.") lowerCAmelCase__ =parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
713
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase__ =logging.get_logger(__name__) lowerCAmelCase__ ={ "Visual-Attention-Network/van-base": ( "https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json" ), } class A__( __magic_name__ ): lowerCAmelCase = '''van''' def __init__( self : int , __SCREAMING_SNAKE_CASE : Optional[Any]=2_24 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : Tuple=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE : Optional[int]=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE : str=[64, 1_28, 3_20, 5_12] , __SCREAMING_SNAKE_CASE : Optional[Any]=[3, 3, 12, 3] , __SCREAMING_SNAKE_CASE : Dict=[8, 8, 4, 4] , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : Dict=1E-6 , __SCREAMING_SNAKE_CASE : Any=1E-2 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , **__SCREAMING_SNAKE_CASE : str , ) -> List[str]: """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = patch_sizes __SCREAMING_SNAKE_CASE = strides __SCREAMING_SNAKE_CASE = hidden_sizes __SCREAMING_SNAKE_CASE = depths __SCREAMING_SNAKE_CASE = mlp_ratios __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = layer_scale_init_value __SCREAMING_SNAKE_CASE = drop_path_rate __SCREAMING_SNAKE_CASE = dropout_rate
690
0
"""simple docstring""" from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar lowerCAmelCase__ =TypeVar("KEY") lowerCAmelCase__ =TypeVar("VAL") @dataclass(frozen=__magic_name__ , slots=__magic_name__ ) class A__( Generic[KEY, VAL] ): lowerCAmelCase = 42 lowerCAmelCase = 42 class A__( _Item ): def __init__( self : Tuple ) -> None: """simple docstring""" super().__init__(_UpperCAmelCase , _UpperCAmelCase ) def __bool__( self : List[Any] ) -> bool: """simple docstring""" return False lowerCAmelCase__ =_DeletedItem() class A__( MutableMapping[KEY, VAL] ): def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : int = 8 , __SCREAMING_SNAKE_CASE : float = 0.75 ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = initial_block_size __SCREAMING_SNAKE_CASE = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 __SCREAMING_SNAKE_CASE = capacity_factor __SCREAMING_SNAKE_CASE = 0 def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : KEY ) -> int: """simple docstring""" return hash(_UpperCAmelCase ) % len(self._buckets ) def _a ( self : Dict , __SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" return (ind + 1) % len(self._buckets ) def _a ( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : KEY , __SCREAMING_SNAKE_CASE : VAL ) -> bool: """simple docstring""" __SCREAMING_SNAKE_CASE = self._buckets[ind] if not stored: __SCREAMING_SNAKE_CASE = _Item(_UpperCAmelCase , _UpperCAmelCase ) self._len += 1 return True elif stored.key == key: __SCREAMING_SNAKE_CASE = _Item(_UpperCAmelCase , _UpperCAmelCase ) return True else: return False def _a ( self : Union[str, Any] ) -> bool: """simple docstring""" __SCREAMING_SNAKE_CASE = len(self._buckets ) * self._capacity_factor return len(self ) >= int(_UpperCAmelCase ) def _a ( self : Union[str, Any] ) -> bool: """simple docstring""" if len(self._buckets ) <= self._initial_block_size: return False __SCREAMING_SNAKE_CASE = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = self._buckets __SCREAMING_SNAKE_CASE = [None] * new_size __SCREAMING_SNAKE_CASE = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def _a ( self : Optional[int] ) -> None: """simple docstring""" self._resize(len(self._buckets ) * 2 ) def _a ( self : str ) -> None: """simple docstring""" self._resize(len(self._buckets ) // 2 ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : KEY ) -> Iterator[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = self._get_bucket_index(_UpperCAmelCase ) for _ in range(len(self._buckets ) ): yield ind __SCREAMING_SNAKE_CASE = self._get_next_ind(_UpperCAmelCase ) def _a ( self : int , __SCREAMING_SNAKE_CASE : KEY , __SCREAMING_SNAKE_CASE : VAL ) -> None: """simple docstring""" for ind in self._iterate_buckets(_UpperCAmelCase ): if self._try_set(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): break def __setitem__( self : List[str] , __SCREAMING_SNAKE_CASE : KEY , __SCREAMING_SNAKE_CASE : VAL ) -> None: """simple docstring""" if self._is_full(): self._size_up() self._add_item(_UpperCAmelCase , _UpperCAmelCase ) def __delitem__( self : Any , __SCREAMING_SNAKE_CASE : KEY ) -> None: """simple docstring""" for ind in self._iterate_buckets(_UpperCAmelCase ): __SCREAMING_SNAKE_CASE = self._buckets[ind] if item is None: raise KeyError(_UpperCAmelCase ) if item is _deleted: continue if item.key == key: __SCREAMING_SNAKE_CASE = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : int , __SCREAMING_SNAKE_CASE : KEY ) -> VAL: """simple docstring""" for ind in self._iterate_buckets(_UpperCAmelCase ): __SCREAMING_SNAKE_CASE = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(_UpperCAmelCase ) def __len__( self : Union[str, Any] ) -> int: """simple docstring""" return self._len def __iter__( self : Optional[Any] ) -> Iterator[KEY]: """simple docstring""" yield from (item.key for item in self._buckets if item) def __repr__( self : Tuple ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = ''' ,'''.join( f"""{item.key}: {item.val}""" for item in self._buckets if item ) return f"""HashMap({val_string})"""
714
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ ={"configuration_timm_backbone": ["TimmBackboneConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =["TimmBackbone"] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys lowerCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
690
0
"""simple docstring""" import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def _a ( UpperCAmelCase__ ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = tokenizer(example['''content'''] , truncation=__A )["""input_ids"""] __SCREAMING_SNAKE_CASE = len(example['''content'''] ) / len(output['''input_ids'''] ) return output lowerCAmelCase__ =HfArgumentParser(PretokenizationArguments) lowerCAmelCase__ =parser.parse_args() if args.num_workers is None: lowerCAmelCase__ =multiprocessing.cpu_count() lowerCAmelCase__ =AutoTokenizer.from_pretrained(args.tokenizer_dir) lowerCAmelCase__ =time.time() lowerCAmelCase__ =load_dataset(args.dataset_name, split="train") print(F'''Dataset loaded in {time.time()-t_start:.2f}s''') lowerCAmelCase__ =time.time() lowerCAmelCase__ =ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ "repo_name", "path", "copies", "size", "content", "license", "hash", "line_mean", "line_max", "alpha_frac", "autogenerated", ], ) print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''') lowerCAmelCase__ =time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
715
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase__ ={ "configuration_altclip": [ "ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "AltCLIPConfig", "AltCLIPTextConfig", "AltCLIPVisionConfig", ], "processing_altclip": ["AltCLIPProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =[ "ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "AltCLIPPreTrainedModel", "AltCLIPModel", "AltCLIPTextModel", "AltCLIPVisionModel", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys lowerCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
690
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A__( a__ , unittest.TestCase ): lowerCAmelCase = KandinskyVaaControlnetPipeline lowerCAmelCase = ['''image_embeds''', '''negative_image_embeds''', '''hint'''] lowerCAmelCase = ['''image_embeds''', '''negative_image_embeds''', '''hint'''] lowerCAmelCase = [ '''generator''', '''height''', '''width''', '''latents''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] lowerCAmelCase = False @property def _a ( self : List[str] ) -> Any: """simple docstring""" return 32 @property def _a ( self : int ) -> Union[str, Any]: """simple docstring""" return 32 @property def _a ( self : Optional[int] ) -> List[str]: """simple docstring""" return self.time_input_dim @property def _a ( self : Tuple ) -> List[str]: """simple docstring""" return self.time_input_dim * 4 @property def _a ( self : List[Any] ) -> Tuple: """simple docstring""" return 1_00 @property def _a ( self : int ) -> int: """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = { 'in_channels': 8, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image_hint', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } __SCREAMING_SNAKE_CASE = UNetaDConditionModel(**_A ) return model @property def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def _a ( self : Tuple ) -> Tuple: """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = VQModel(**self.dummy_movq_kwargs ) return model def _a ( self : Tuple ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.dummy_unet __SCREAMING_SNAKE_CASE = self.dummy_movq __SCREAMING_SNAKE_CASE = DDIMScheduler( num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=_A , set_alpha_to_one=_A , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_A , ) __SCREAMING_SNAKE_CASE = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any=0 ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_A ) ).to(_A ) __SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( _A ) # create hint __SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(_A ) ).to(_A ) if str(_A ).startswith('''mps''' ): __SCREAMING_SNAKE_CASE = torch.manual_seed(_A ) else: __SCREAMING_SNAKE_CASE = torch.Generator(device=_A ).manual_seed(_A ) __SCREAMING_SNAKE_CASE = { 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'hint': hint, 'generator': generator, 'height': 64, 'width': 64, 'guidance_scale': 4.0, 'num_inference_steps': 2, 'output_type': 'np', } return inputs def _a ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = 'cpu' __SCREAMING_SNAKE_CASE = self.get_dummy_components() __SCREAMING_SNAKE_CASE = self.pipeline_class(**_A ) __SCREAMING_SNAKE_CASE = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) __SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(_A ) ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = pipe( **self.get_dummy_inputs(_A ) , return_dict=_A , )[0] __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __SCREAMING_SNAKE_CASE = np.array( [0.6_95_98_26, 0.86_82_79, 0.7_55_80_92, 0.68_76_94_67, 0.85_80_58_04, 0.65_97_74_96, 0.44_88_53_02, 0.5_95_91_11, 0.4_25_15_95] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class A__( unittest.TestCase ): def _a ( self : Union[str, Any] ) -> int: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : Tuple ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' ) __SCREAMING_SNAKE_CASE = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/hint_image_cat.png''' ) __SCREAMING_SNAKE_CASE = torch.from_numpy(np.array(_A ) ).float() / 2_55.0 __SCREAMING_SNAKE_CASE = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) __SCREAMING_SNAKE_CASE = KandinskyVaaPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(_A ) __SCREAMING_SNAKE_CASE = KandinskyVaaControlnetPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa ) __SCREAMING_SNAKE_CASE = pipeline.to(_A ) pipeline.set_progress_bar_config(disable=_A ) __SCREAMING_SNAKE_CASE = 'A robot, 4k photo' __SCREAMING_SNAKE_CASE = torch.Generator(device='''cuda''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe_prior( _A , generator=_A , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() __SCREAMING_SNAKE_CASE = torch.Generator(device='''cuda''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipeline( image_embeds=_A , negative_image_embeds=_A , hint=_A , generator=_A , num_inference_steps=1_00 , output_type='''np''' , ) __SCREAMING_SNAKE_CASE = output.images[0] assert image.shape == (5_12, 5_12, 3) assert_mean_pixel_difference(_A , _A )
716
"""simple docstring""" import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class A__( unittest.TestCase ): def _a ( self : int ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = 0 def _a ( self : Tuple ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : str ) -> Optional[int]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json''' __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : List[str] ) -> Optional[int]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json''' __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : str ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = CLIPConfig() # Create a dummy config file with image_proceesor_type __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json''' __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ).to_dict() config_dict.pop('''image_processor_type''' ) __SCREAMING_SNAKE_CASE = CLIPImageProcessor(**__SCREAMING_SNAKE_CASE ) # save in new folder model_config.save_pretrained(__SCREAMING_SNAKE_CASE ) config.save_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) # make sure private variable is not incorrectly saved __SCREAMING_SNAKE_CASE = json.loads(config.to_json_string() ) self.assertTrue('''_processor_class''' not in dict_as_saved ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : Optional[int] ) -> Tuple: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : List[Any] ) -> str: """simple docstring""" with self.assertRaisesRegex( __SCREAMING_SNAKE_CASE , '''clip-base is not a local folder and is not a valid model identifier''' ): __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''clip-base''' ) def _a ( self : Any ) -> Optional[Any]: """simple docstring""" with self.assertRaisesRegex( __SCREAMING_SNAKE_CASE , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE , revision='''aaaaaa''' ) def _a ( self : Dict ) -> Dict: """simple docstring""" with self.assertRaisesRegex( __SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ): __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' ) def _a ( self : int ) -> Any: """simple docstring""" with self.assertRaises(__SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(__SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE , trust_remote_code=__SCREAMING_SNAKE_CASE ) self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' ) def _a ( self : Optional[Any] ) -> str: """simple docstring""" try: AutoConfig.register('''custom''' , __SCREAMING_SNAKE_CASE ) AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__SCREAMING_SNAKE_CASE ): AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json''' __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) ) __SCREAMING_SNAKE_CASE = CustomImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def _a ( self : int ) -> List[Any]: """simple docstring""" class A__( __magic_name__ ): lowerCAmelCase = True try: AutoConfig.register('''custom''' , __SCREAMING_SNAKE_CASE ) AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # If remote code is not set, the default is to use local __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(not hasattr(__SCREAMING_SNAKE_CASE , '''is_local''' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
690
0
"""simple docstring""" from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class A__( _A ): lowerCAmelCase = '''EncodecFeatureExtractor''' lowerCAmelCase = ('''T5Tokenizer''', '''T5TokenizerFast''') def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]: """simple docstring""" super().__init__(__lowerCamelCase , __lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.feature_extractor __SCREAMING_SNAKE_CASE = False def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Dict=True ) -> Union[str, Any]: """simple docstring""" return self.tokenizer.get_decoder_prompt_ids(task=__lowerCamelCase , language=__lowerCamelCase , no_timestamps=__lowerCamelCase ) def __call__( self : Dict , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]: """simple docstring""" if self._in_target_context_manager: return self.current_processor(*__lowerCamelCase , **__lowerCamelCase ) __SCREAMING_SNAKE_CASE = kwargs.pop('''audio''' , __lowerCamelCase ) __SCREAMING_SNAKE_CASE = kwargs.pop('''sampling_rate''' , __lowerCamelCase ) __SCREAMING_SNAKE_CASE = kwargs.pop('''text''' , __lowerCamelCase ) if len(__lowerCamelCase ) > 0: __SCREAMING_SNAKE_CASE = args[0] __SCREAMING_SNAKE_CASE = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if text is not None: __SCREAMING_SNAKE_CASE = self.tokenizer(__lowerCamelCase , **__lowerCamelCase ) if audio is not None: __SCREAMING_SNAKE_CASE = self.feature_extractor(__lowerCamelCase , *__lowerCamelCase , sampling_rate=__lowerCamelCase , **__lowerCamelCase ) if audio is None: return inputs elif text is None: return audio_inputs else: __SCREAMING_SNAKE_CASE = audio_inputs["input_values"] if "padding_mask" in audio_inputs: __SCREAMING_SNAKE_CASE = audio_inputs["padding_mask"] return inputs def _a ( self : Union[str, Any] , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = kwargs.pop('''audio''' , __lowerCamelCase ) __SCREAMING_SNAKE_CASE = kwargs.pop('''padding_mask''' , __lowerCamelCase ) if len(__lowerCamelCase ) > 0: __SCREAMING_SNAKE_CASE = args[0] __SCREAMING_SNAKE_CASE = args[1:] if audio_values is not None: return self._decode_audio(__lowerCamelCase , padding_mask=__lowerCamelCase ) else: return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase ) def _a ( self : List[Any] , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]: """simple docstring""" return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase ) def _a ( self : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional = None ) -> List[np.ndarray]: """simple docstring""" __SCREAMING_SNAKE_CASE = to_numpy(__lowerCamelCase ) __SCREAMING_SNAKE_CASE = audio_values.shape if padding_mask is None: return list(__lowerCamelCase ) __SCREAMING_SNAKE_CASE = to_numpy(__lowerCamelCase ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) __SCREAMING_SNAKE_CASE = seq_len - padding_mask.shape[-1] __SCREAMING_SNAKE_CASE = 1 - self.feature_extractor.padding_value __SCREAMING_SNAKE_CASE = np.pad(__lowerCamelCase , ((0, 0), (0, difference)) , '''constant''' , constant_values=__lowerCamelCase ) __SCREAMING_SNAKE_CASE = audio_values.tolist() for i in range(__lowerCamelCase ): __SCREAMING_SNAKE_CASE = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] __SCREAMING_SNAKE_CASE = sliced_audio.reshape(__lowerCamelCase , -1 ) return audio_values
717
"""simple docstring""" import math lowerCAmelCase__ =10 lowerCAmelCase__ =7 lowerCAmelCase__ =BALLS_PER_COLOUR * NUM_COLOURS def _a ( UpperCAmelCase__ = 20 ) -> str: __SCREAMING_SNAKE_CASE = math.comb(UpperCAmelCase__ , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = NUM_COLOURS * (1 - missing_colour / total) return f"""{result:.9f}""" if __name__ == "__main__": print(solution(20))
690
0
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def _a ( UpperCAmelCase__ ) -> Tuple: __SCREAMING_SNAKE_CASE = SwinvaConfig() __SCREAMING_SNAKE_CASE = swinva_name.split('''_''' ) __SCREAMING_SNAKE_CASE = name_split[1] if "to" in name_split[3]: __SCREAMING_SNAKE_CASE = int(name_split[3][-3:] ) else: __SCREAMING_SNAKE_CASE = int(name_split[3] ) if "to" in name_split[2]: __SCREAMING_SNAKE_CASE = int(name_split[2][-2:] ) else: __SCREAMING_SNAKE_CASE = int(name_split[2][6:] ) if model_size == "tiny": __SCREAMING_SNAKE_CASE = 96 __SCREAMING_SNAKE_CASE = (2, 2, 6, 2) __SCREAMING_SNAKE_CASE = (3, 6, 12, 24) elif model_size == "small": __SCREAMING_SNAKE_CASE = 96 __SCREAMING_SNAKE_CASE = (2, 2, 18, 2) __SCREAMING_SNAKE_CASE = (3, 6, 12, 24) elif model_size == "base": __SCREAMING_SNAKE_CASE = 1_28 __SCREAMING_SNAKE_CASE = (2, 2, 18, 2) __SCREAMING_SNAKE_CASE = (4, 8, 16, 32) else: __SCREAMING_SNAKE_CASE = 1_92 __SCREAMING_SNAKE_CASE = (2, 2, 18, 2) __SCREAMING_SNAKE_CASE = (6, 12, 24, 48) if "to" in swinva_name: __SCREAMING_SNAKE_CASE = (12, 12, 12, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): __SCREAMING_SNAKE_CASE = 2_18_41 __SCREAMING_SNAKE_CASE = '''huggingface/label-files''' __SCREAMING_SNAKE_CASE = '''imagenet-22k-id2label.json''' __SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''' ) , '''r''' ) ) __SCREAMING_SNAKE_CASE = {int(a_ ): v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE = idalabel __SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} else: __SCREAMING_SNAKE_CASE = 10_00 __SCREAMING_SNAKE_CASE = '''huggingface/label-files''' __SCREAMING_SNAKE_CASE = '''imagenet-1k-id2label.json''' __SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''' ) , '''r''' ) ) __SCREAMING_SNAKE_CASE = {int(a_ ): v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE = idalabel __SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE = img_size __SCREAMING_SNAKE_CASE = num_classes __SCREAMING_SNAKE_CASE = embed_dim __SCREAMING_SNAKE_CASE = depths __SCREAMING_SNAKE_CASE = num_heads __SCREAMING_SNAKE_CASE = window_size return config def _a ( UpperCAmelCase__ ) -> Tuple: if "patch_embed.proj" in name: __SCREAMING_SNAKE_CASE = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: __SCREAMING_SNAKE_CASE = name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if "layers" in name: __SCREAMING_SNAKE_CASE = '''encoder.''' + name if "attn.proj" in name: __SCREAMING_SNAKE_CASE = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: __SCREAMING_SNAKE_CASE = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: __SCREAMING_SNAKE_CASE = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: __SCREAMING_SNAKE_CASE = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: __SCREAMING_SNAKE_CASE = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: __SCREAMING_SNAKE_CASE = name.replace('''mlp.fc2''' , '''output.dense''' ) if "q_bias" in name: __SCREAMING_SNAKE_CASE = name.replace('''q_bias''' , '''query.bias''' ) if "k_bias" in name: __SCREAMING_SNAKE_CASE = name.replace('''k_bias''' , '''key.bias''' ) if "v_bias" in name: __SCREAMING_SNAKE_CASE = name.replace('''v_bias''' , '''value.bias''' ) if "cpb_mlp" in name: __SCREAMING_SNAKE_CASE = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' ) if name == "norm.weight": __SCREAMING_SNAKE_CASE = '''layernorm.weight''' if name == "norm.bias": __SCREAMING_SNAKE_CASE = '''layernorm.bias''' if "head" in name: __SCREAMING_SNAKE_CASE = name.replace('''head''' , '''classifier''' ) else: __SCREAMING_SNAKE_CASE = '''swinv2.''' + name return name def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> str: for key in orig_state_dict.copy().keys(): __SCREAMING_SNAKE_CASE = orig_state_dict.pop(a_ ) if "mask" in key: continue elif "qkv" in key: __SCREAMING_SNAKE_CASE = key.split('''.''' ) __SCREAMING_SNAKE_CASE = int(key_split[1] ) __SCREAMING_SNAKE_CASE = int(key_split[3] ) __SCREAMING_SNAKE_CASE = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: __SCREAMING_SNAKE_CASE = val[:dim, :] __SCREAMING_SNAKE_CASE = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE = val[-dim:, :] else: __SCREAMING_SNAKE_CASE = val[:dim] __SCREAMING_SNAKE_CASE = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE = val[-dim:] else: __SCREAMING_SNAKE_CASE = val return orig_state_dict def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> int: __SCREAMING_SNAKE_CASE = timm.create_model(a_ , pretrained=a_ ) timm_model.eval() __SCREAMING_SNAKE_CASE = get_swinva_config(a_ ) __SCREAMING_SNAKE_CASE = SwinvaForImageClassification(a_ ) model.eval() __SCREAMING_SNAKE_CASE = convert_state_dict(timm_model.state_dict() , a_ ) model.load_state_dict(a_ ) __SCREAMING_SNAKE_CASE = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' , '''-''' ) ) ) __SCREAMING_SNAKE_CASE = Image.open(requests.get(a_ , stream=a_ ).raw ) __SCREAMING_SNAKE_CASE = image_processor(images=a_ , return_tensors='''pt''' ) __SCREAMING_SNAKE_CASE = timm_model(inputs['''pixel_values'''] ) __SCREAMING_SNAKE_CASE = model(**a_ ).logits assert torch.allclose(a_ , a_ , atol=1E-3 ) print(f"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(a_ ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(a_ ) model.push_to_hub( repo_path_or_name=Path(a_ , a_ ) , organization='''nandwalritik''' , commit_message='''Add model''' , ) if __name__ == "__main__": lowerCAmelCase__ =argparse.ArgumentParser() # Required parameters parser.add_argument( "--swinv2_name", default="swinv2_tiny_patch4_window8_256", type=str, help="Name of the Swinv2 timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) lowerCAmelCase__ =parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
718
"""simple docstring""" from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax lowerCAmelCase__ =logging.get_logger(__name__) @add_end_docstrings(__magic_name__ ) class A__( __magic_name__ ): def __init__( self : Optional[Any] , **__SCREAMING_SNAKE_CASE : str ) -> Optional[Any]: """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) requires_backends(self , '''vision''' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image", List["Image"]] , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple: """simple docstring""" return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _a ( self : int , **__SCREAMING_SNAKE_CASE : int ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = {} if "candidate_labels" in kwargs: __SCREAMING_SNAKE_CASE = kwargs['''candidate_labels'''] if "hypothesis_template" in kwargs: __SCREAMING_SNAKE_CASE = kwargs['''hypothesis_template'''] return preprocess_params, {}, {} def _a ( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]="This is a photo of {}." ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = load_image(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.image_processor(images=[image] , return_tensors=self.framework ) __SCREAMING_SNAKE_CASE = candidate_labels __SCREAMING_SNAKE_CASE = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels] __SCREAMING_SNAKE_CASE = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [text_inputs] return inputs def _a ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = model_inputs.pop('''candidate_labels''' ) __SCREAMING_SNAKE_CASE = model_inputs.pop('''text_inputs''' ) if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = text_inputs[0] else: # Batching case. __SCREAMING_SNAKE_CASE = text_inputs[0][0] __SCREAMING_SNAKE_CASE = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = { '''candidate_labels''': candidate_labels, '''logits''': outputs.logits_per_image, } return model_outputs def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = model_outputs.pop('''candidate_labels''' ) __SCREAMING_SNAKE_CASE = model_outputs['''logits'''][0] if self.framework == "pt": __SCREAMING_SNAKE_CASE = logits.softmax(dim=-1 ).squeeze(-1 ) __SCREAMING_SNAKE_CASE = probs.tolist() if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = [scores] elif self.framework == "tf": __SCREAMING_SNAKE_CASE = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 ) __SCREAMING_SNAKE_CASE = probs.numpy().tolist() else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) __SCREAMING_SNAKE_CASE = [ {'''score''': score, '''label''': candidate_label} for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : -x[0] ) ] return result
690
0
"""simple docstring""" lowerCAmelCase__ ={} def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> int: if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on __SCREAMING_SNAKE_CASE = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one __SCREAMING_SNAKE_CASE = _calculate(days - 1 , _A , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 __SCREAMING_SNAKE_CASE = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter __SCREAMING_SNAKE_CASE = _calculate(days - 1 , _A , 0 ) __SCREAMING_SNAKE_CASE = state_late + state_absent + state_ontime __SCREAMING_SNAKE_CASE = prizestrings return prizestrings def _a ( UpperCAmelCase__ = 30 ) -> int: return _calculate(_A , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
719
"""simple docstring""" from __future__ import annotations from collections.abc import Callable lowerCAmelCase__ =list[list[float | int]] def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> Matrix: __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [[0 for _ in range(size + 1 )] for _ in range(UpperCAmelCase__ )] __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for row in range(UpperCAmelCase__ ): for col in range(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = matrix[row][col] __SCREAMING_SNAKE_CASE = vector[row][0] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 while row < size and col < size: # pivoting __SCREAMING_SNAKE_CASE = max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCAmelCase__ , UpperCAmelCase__ ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = augmented[rowa][col] / augmented[row][col] __SCREAMING_SNAKE_CASE = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , UpperCAmelCase__ ): for row in range(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = augmented[row][col] / augmented[col][col] for cola in range(UpperCAmelCase__ , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(UpperCAmelCase__ ) ] def _a ( UpperCAmelCase__ ) -> Callable[[int], int]: __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [[0 for _ in range(UpperCAmelCase__ )] for _ in range(UpperCAmelCase__ )] __SCREAMING_SNAKE_CASE = [[0] for _ in range(UpperCAmelCase__ )] __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for x_val, y_val in enumerate(UpperCAmelCase__ ): for col in range(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = (x_val + 1) ** (size - col - 1) __SCREAMING_SNAKE_CASE = y_val __SCREAMING_SNAKE_CASE = solve(UpperCAmelCase__ , UpperCAmelCase__ ) def interpolated_func(UpperCAmelCase__ ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(UpperCAmelCase__ ) ) return interpolated_func def _a ( UpperCAmelCase__ ) -> int: return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def _a ( UpperCAmelCase__ = question_function , UpperCAmelCase__ = 10 ) -> int: __SCREAMING_SNAKE_CASE = [func(UpperCAmelCase__ ) for x_val in range(1 , order + 1 )] __SCREAMING_SNAKE_CASE = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for poly in polynomials: __SCREAMING_SNAKE_CASE = 1 while func(UpperCAmelCase__ ) == poly(UpperCAmelCase__ ): x_val += 1 ret += poly(UpperCAmelCase__ ) return ret if __name__ == "__main__": print(F'''{solution() = }''')
690
0
"""simple docstring""" lowerCAmelCase__ ={ 0: """0""", 1: """1""", 2: """2""", 3: """3""", 4: """4""", 5: """5""", 6: """6""", 7: """7""", 8: """8""", 9: """9""", 10: """a""", 11: """b""", 12: """c""", 13: """d""", 14: """e""", 15: """f""", } def _a ( UpperCAmelCase__ ) -> str: assert type(UpperCAmelCase__ ) in (int, float) and decimal == int(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = int(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = '''''' __SCREAMING_SNAKE_CASE = False if decimal < 0: __SCREAMING_SNAKE_CASE = True decimal *= -1 while decimal > 0: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = divmod(UpperCAmelCase__ , 16 ) __SCREAMING_SNAKE_CASE = values[remainder] + hexadecimal __SCREAMING_SNAKE_CASE = '''0x''' + hexadecimal if negative: __SCREAMING_SNAKE_CASE = '''-''' + hexadecimal return hexadecimal if __name__ == "__main__": import doctest doctest.testmod()
720
"""simple docstring""" from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def _a ( UpperCAmelCase__ = "isbn/0140328726" ) -> dict: __SCREAMING_SNAKE_CASE = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes if new_olid.count('''/''' ) != 1: __SCREAMING_SNAKE_CASE = f"""{olid} is not a valid Open Library olid""" raise ValueError(UpperCAmelCase__ ) return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json() def _a ( UpperCAmelCase__ ) -> dict: __SCREAMING_SNAKE_CASE = { '''title''': '''Title''', '''publish_date''': '''Publish date''', '''authors''': '''Authors''', '''number_of_pages''': '''Number of pages:''', '''first_sentence''': '''First sentence''', '''isbn_10''': '''ISBN (10)''', '''isbn_13''': '''ISBN (13)''', } __SCREAMING_SNAKE_CASE = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} __SCREAMING_SNAKE_CASE = [ get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors'''] ] __SCREAMING_SNAKE_CASE = data['''First sentence''']['''value'''] for key, value in data.items(): if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = ''', '''.join(UpperCAmelCase__ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: lowerCAmelCase__ =input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(F'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''') continue print(F'''\nSearching Open Library for ISBN: {isbn}...\n''') try: lowerCAmelCase__ =summarize_book(get_openlibrary_data(F'''isbn/{isbn}''')) print("\n".join(F'''{key}: {value}''' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(F'''Sorry, there are no results for ISBN: {isbn}.''')
690
0
"""simple docstring""" from __future__ import annotations def _a ( UpperCAmelCase__ ) -> Union[str, Any]: for i in range(1 , len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 , len(__UpperCamelCase ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 , len(__UpperCamelCase ) ): for j in range(1 , len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
721
"""simple docstring""" from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCAmelCase__ =logging.get_logger(__name__) class A__( __magic_name__ ): lowerCAmelCase = ['''audio_values''', '''audio_mask'''] def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any]=20_48 , __SCREAMING_SNAKE_CASE : str=1 , __SCREAMING_SNAKE_CASE : List[Any]=[16, 16] , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_28 , __SCREAMING_SNAKE_CASE : int=4_41_00 , __SCREAMING_SNAKE_CASE : Union[str, Any]=86 , __SCREAMING_SNAKE_CASE : str=20_48 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> Any: """simple docstring""" super().__init__( feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = spectrogram_length __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = patch_size __SCREAMING_SNAKE_CASE = feature_size // self.patch_size[1] __SCREAMING_SNAKE_CASE = n_fft __SCREAMING_SNAKE_CASE = sampling_rate // hop_length_to_sampling_rate __SCREAMING_SNAKE_CASE = sampling_rate __SCREAMING_SNAKE_CASE = padding_value __SCREAMING_SNAKE_CASE = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , ).T def _a ( self : str , __SCREAMING_SNAKE_CASE : np.array ) -> np.ndarray: """simple docstring""" __SCREAMING_SNAKE_CASE = spectrogram( __SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , ) __SCREAMING_SNAKE_CASE = log_spec[:, :-1] __SCREAMING_SNAKE_CASE = log_spec - 20.0 __SCREAMING_SNAKE_CASE = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : str , __SCREAMING_SNAKE_CASE : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = True , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , **__SCREAMING_SNAKE_CASE : Tuple , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( '''This feature extractor is set to support sampling rate''' f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled""" f""" with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) __SCREAMING_SNAKE_CASE = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) __SCREAMING_SNAKE_CASE = is_batched_numpy or ( isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __SCREAMING_SNAKE_CASE = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ): __SCREAMING_SNAKE_CASE = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __SCREAMING_SNAKE_CASE = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __SCREAMING_SNAKE_CASE = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis __SCREAMING_SNAKE_CASE = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask __SCREAMING_SNAKE_CASE = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: __SCREAMING_SNAKE_CASE = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] __SCREAMING_SNAKE_CASE = np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa ) # convert into correct format for padding __SCREAMING_SNAKE_CASE = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch __SCREAMING_SNAKE_CASE = np.ones([len(__SCREAMING_SNAKE_CASE ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) __SCREAMING_SNAKE_CASE = padded_audio_features * self.padding_value for i in range(len(__SCREAMING_SNAKE_CASE ) ): __SCREAMING_SNAKE_CASE = audio_features[i] __SCREAMING_SNAKE_CASE = feature # return as BatchFeature if return_attention_mask: __SCREAMING_SNAKE_CASE = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask} else: __SCREAMING_SNAKE_CASE = {'''audio_values''': padded_audio_features} __SCREAMING_SNAKE_CASE = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE ) return encoded_inputs
690
0
"""simple docstring""" import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class A__: @staticmethod def _a ( *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Tuple ) -> str: """simple docstring""" pass @is_pipeline_test @require_vision class A__( unittest.TestCase ): @require_torch def _a ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) __SCREAMING_SNAKE_CASE = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) __SCREAMING_SNAKE_CASE = image_classifier(A_ , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(A_ ) , [ [{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''b'''}, {'''score''': 0.3_33, '''label''': '''c'''}], [{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''c'''}, {'''score''': 0.3_33, '''label''': '''b'''}], ] , ) __SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(A_ ) , [ [ {'''score''': 0.3_33, '''label''': ANY(A_ )}, {'''score''': 0.3_33, '''label''': ANY(A_ )}, {'''score''': 0.3_33, '''label''': ANY(A_ )}, ], [ {'''score''': 0.3_33, '''label''': ANY(A_ )}, {'''score''': 0.3_33, '''label''': ANY(A_ )}, {'''score''': 0.3_33, '''label''': ANY(A_ )}, ], [ {'''score''': 0.3_33, '''label''': ANY(A_ )}, {'''score''': 0.3_33, '''label''': ANY(A_ )}, {'''score''': 0.3_33, '''label''': ANY(A_ )}, ], [ {'''score''': 0.3_33, '''label''': ANY(A_ )}, {'''score''': 0.3_33, '''label''': ANY(A_ )}, {'''score''': 0.3_33, '''label''': ANY(A_ )}, ], [ {'''score''': 0.3_33, '''label''': ANY(A_ )}, {'''score''': 0.3_33, '''label''': ANY(A_ )}, {'''score''': 0.3_33, '''label''': ANY(A_ )}, ], ] , ) @require_tf def _a ( self : Any ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) __SCREAMING_SNAKE_CASE = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) __SCREAMING_SNAKE_CASE = image_classifier(A_ , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(A_ ) , [{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''b'''}, {'''score''': 0.3_33, '''label''': '''c'''}] , ) __SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(A_ ) , [ [ {'''score''': 0.3_33, '''label''': ANY(A_ )}, {'''score''': 0.3_33, '''label''': ANY(A_ )}, {'''score''': 0.3_33, '''label''': ANY(A_ )}, ], [ {'''score''': 0.3_33, '''label''': ANY(A_ )}, {'''score''': 0.3_33, '''label''': ANY(A_ )}, {'''score''': 0.3_33, '''label''': ANY(A_ )}, ], [ {'''score''': 0.3_33, '''label''': ANY(A_ )}, {'''score''': 0.3_33, '''label''': ANY(A_ )}, {'''score''': 0.3_33, '''label''': ANY(A_ )}, ], [ {'''score''': 0.3_33, '''label''': ANY(A_ )}, {'''score''': 0.3_33, '''label''': ANY(A_ )}, {'''score''': 0.3_33, '''label''': ANY(A_ )}, ], [ {'''score''': 0.3_33, '''label''': ANY(A_ )}, {'''score''': 0.3_33, '''label''': ANY(A_ )}, {'''score''': 0.3_33, '''label''': ANY(A_ )}, ], ] , ) @slow @require_torch def _a ( self : Any ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes __SCREAMING_SNAKE_CASE = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) __SCREAMING_SNAKE_CASE = image_classifier(A_ , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(A_ ) , [ {'''score''': 0.5_11, '''label''': '''remote'''}, {'''score''': 0.4_85, '''label''': '''cat'''}, {'''score''': 0.0_04, '''label''': '''plane'''}, ] , ) __SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(A_ ) , [ [ {'''score''': 0.5_11, '''label''': '''remote'''}, {'''score''': 0.4_85, '''label''': '''cat'''}, {'''score''': 0.0_04, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def _a ( self : Dict ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes __SCREAMING_SNAKE_CASE = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) __SCREAMING_SNAKE_CASE = image_classifier(A_ , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(A_ ) , [ {'''score''': 0.5_11, '''label''': '''remote'''}, {'''score''': 0.4_85, '''label''': '''cat'''}, {'''score''': 0.0_04, '''label''': '''plane'''}, ] , ) __SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(A_ ) , [ [ {'''score''': 0.5_11, '''label''': '''remote'''}, {'''score''': 0.4_85, '''label''': '''cat'''}, {'''score''': 0.0_04, '''label''': '''plane'''}, ], ] * 5 , )
700
"""simple docstring""" def _a ( UpperCAmelCase__ ) -> str: __SCREAMING_SNAKE_CASE = '''''' for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def _a ( UpperCAmelCase__ ) -> dict[str, str]: __SCREAMING_SNAKE_CASE = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key __SCREAMING_SNAKE_CASE = remove_duplicates(key.upper() ) __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) # First fill cipher with key characters __SCREAMING_SNAKE_CASE = {alphabet[i]: char for i, char in enumerate(UpperCAmelCase__ )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(UpperCAmelCase__ ) , 26 ): __SCREAMING_SNAKE_CASE = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 __SCREAMING_SNAKE_CASE = alphabet[i - offset] __SCREAMING_SNAKE_CASE = char return cipher_alphabet def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> str: return "".join(cipher_map.get(UpperCAmelCase__ , UpperCAmelCase__ ) for ch in message.upper() ) def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> str: __SCREAMING_SNAKE_CASE = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(UpperCAmelCase__ , UpperCAmelCase__ ) for ch in message.upper() ) def _a ( ) -> None: __SCREAMING_SNAKE_CASE = input('''Enter message to encode or decode: ''' ).strip() __SCREAMING_SNAKE_CASE = input('''Enter keyword: ''' ).strip() __SCREAMING_SNAKE_CASE = input('''Encipher or decipher? E/D:''' ).strip()[0].lower() try: __SCREAMING_SNAKE_CASE = {'''e''': encipher, '''d''': decipher}[option] except KeyError: raise KeyError('''invalid input option''' ) __SCREAMING_SNAKE_CASE = create_cipher_map(UpperCAmelCase__ ) print(func(UpperCAmelCase__ , UpperCAmelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
690
0
"""simple docstring""" import json import os import unittest from typing import Tuple from transformers import WavaVecaPhonemeCTCTokenizer from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput from transformers.testing_utils import require_phonemizer from ...test_tokenization_common import TokenizerTesterMixin @require_phonemizer class A__( lowercase__ , unittest.TestCase ): lowerCAmelCase = WavaVecaPhonemeCTCTokenizer lowerCAmelCase = False def _a ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" super().setUp() __SCREAMING_SNAKE_CASE = ( '''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː ''' '''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː ''' '''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 ''' '''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ ''' '''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ ''' '''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ ''' '''əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ ''' '''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ ''' '''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ ''' '''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ ''' '''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ ''' '''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ ''' '''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4''' ).split(''' ''' ) __SCREAMING_SNAKE_CASE = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) ) __SCREAMING_SNAKE_CASE = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''} __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' ) def _a ( self : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : int=20 , __SCREAMING_SNAKE_CASE : int=5 ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )) for i in range(len(__SCREAMING_SNAKE_CASE ) )] __SCREAMING_SNAKE_CASE = list(filter(lambda __SCREAMING_SNAKE_CASE : [t[0]] == tokenizer.encode(t[1] , do_phonemize=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) ) if max_length is not None and len(__SCREAMING_SNAKE_CASE ) > max_length: __SCREAMING_SNAKE_CASE = toks[:max_length] if min_length is not None and len(__SCREAMING_SNAKE_CASE ) < min_length and len(__SCREAMING_SNAKE_CASE ) > 0: while len(__SCREAMING_SNAKE_CASE ) < min_length: __SCREAMING_SNAKE_CASE = toks + toks # toks_str = [t[1] for t in toks] __SCREAMING_SNAKE_CASE = [t[0] for t in toks] # Ensure consistency __SCREAMING_SNAKE_CASE = tokenizer.decode(__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE ) if " " not in output_txt and len(__SCREAMING_SNAKE_CASE ) > 1: __SCREAMING_SNAKE_CASE = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE ) + ''' ''' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE ) ) if with_prefix_space: __SCREAMING_SNAKE_CASE = ''' ''' + output_txt __SCREAMING_SNAKE_CASE = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) return output_txt, output_ids def _a ( self : Dict , **__SCREAMING_SNAKE_CASE : Tuple ) -> Union[str, Any]: """simple docstring""" kwargs.update(self.special_tokens_map ) return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ) def _a ( self : Dict ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) # check adding a single token tokenizer.add_tokens('''xxx''' ) __SCREAMING_SNAKE_CASE = tokenizer('''m xxx ɪ''' , do_phonemize=__SCREAMING_SNAKE_CASE ).input_ids self.assertEqual(__SCREAMING_SNAKE_CASE , [13, 3_92, 17] ) # xxx should be last token tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] ) __SCREAMING_SNAKE_CASE = tokenizer('''m aaa ɪ ccc''' , do_phonemize=__SCREAMING_SNAKE_CASE ).input_ids self.assertEqual(__SCREAMING_SNAKE_CASE , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa __SCREAMING_SNAKE_CASE = tokenizer('''maɪ c''' , do_phonemize=__SCREAMING_SNAKE_CASE ).input_ids self.assertEqual(__SCREAMING_SNAKE_CASE , [3, 2_00] ) # mai should be <unk> (=3) def _a ( self : Tuple ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) __SCREAMING_SNAKE_CASE = '''Hello how are you''' __SCREAMING_SNAKE_CASE = tokenizer.phonemize(__SCREAMING_SNAKE_CASE , phonemizer_lang='''en-us''' ) self.assertEqual(__SCREAMING_SNAKE_CASE , '''h ə l oʊ h aʊ ɑːɹ j uː''' ) def _a ( self : Optional[Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) __SCREAMING_SNAKE_CASE = '''Hello how are you''' __SCREAMING_SNAKE_CASE = tokenizer.phonemize(__SCREAMING_SNAKE_CASE , phonemizer_lang='''en-us''' ) self.assertEqual(tokenizer(__SCREAMING_SNAKE_CASE ).input_ids , tokenizer(__SCREAMING_SNAKE_CASE , do_phonemize=__SCREAMING_SNAKE_CASE ).input_ids ) def _a ( self : Any ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) __SCREAMING_SNAKE_CASE = '''Hello how are you''' __SCREAMING_SNAKE_CASE = tokenizer.phonemize(__SCREAMING_SNAKE_CASE , phonemizer_lang='''en-us''' ) __SCREAMING_SNAKE_CASE = tokenizer.decode(tokenizer(__SCREAMING_SNAKE_CASE ).input_ids ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : int ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) __SCREAMING_SNAKE_CASE = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98], [24, 22, 5, 24, 22, 5, 77], ] __SCREAMING_SNAKE_CASE = tokenizer.decode(sample_ids[0] ) __SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , batch_tokens[0] ) self.assertEqual(__SCREAMING_SNAKE_CASE , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] ) def _a ( self : int ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) __SCREAMING_SNAKE_CASE = '''Hello how are you''' __SCREAMING_SNAKE_CASE = tokenizer.phonemize(__SCREAMING_SNAKE_CASE , phonemizer_lang='''en-us''' ) self.assertEqual(__SCREAMING_SNAKE_CASE , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' ) def _a ( self : List[str] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) __SCREAMING_SNAKE_CASE = '''Hello how are you''' __SCREAMING_SNAKE_CASE = tokenizer.phonemize(__SCREAMING_SNAKE_CASE , phonemizer_lang='''en-us''' ) self.assertEqual(tokenizer(__SCREAMING_SNAKE_CASE ).input_ids , tokenizer(__SCREAMING_SNAKE_CASE , do_phonemize=__SCREAMING_SNAKE_CASE ).input_ids ) def _a ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) # fmt: off __SCREAMING_SNAKE_CASE = [ [11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98], [tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77], ] # fmt: on # decode with word_del_token filter __SCREAMING_SNAKE_CASE = tokenizer.decode(sample_ids[0] ) __SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , batch_tokens[0] ) self.assertEqual(__SCREAMING_SNAKE_CASE , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] ) # decode with no word_del_token filter __SCREAMING_SNAKE_CASE = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , filter_word_delimiter_token=__SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , batch_tokens[0] ) self.assertEqual(__SCREAMING_SNAKE_CASE , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] ) def _a ( self : int ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) __SCREAMING_SNAKE_CASE = '''Hello how are you''' __SCREAMING_SNAKE_CASE = tokenizer.phonemize(__SCREAMING_SNAKE_CASE , phonemizer_lang='''en-us''' ) __SCREAMING_SNAKE_CASE = tokenizer.decode(tokenizer(__SCREAMING_SNAKE_CASE ).input_ids , filter_word_delimiter_token=__SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : Optional[Any] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) __SCREAMING_SNAKE_CASE = '''Hello how are you''' __SCREAMING_SNAKE_CASE = tokenizer.phonemize(__SCREAMING_SNAKE_CASE , phonemizer_lang='''en-us''' ) __SCREAMING_SNAKE_CASE = tokenizer.decode(tokenizer(__SCREAMING_SNAKE_CASE ).input_ids , filter_word_delimiter_token=__SCREAMING_SNAKE_CASE ) self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , __SCREAMING_SNAKE_CASE ) def _a ( self : Tuple ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = '''Hello how are you''' __SCREAMING_SNAKE_CASE = tokenizer(__SCREAMING_SNAKE_CASE , phonemizer_lang='''en-us''' ).input_ids __SCREAMING_SNAKE_CASE = tokenizer(__SCREAMING_SNAKE_CASE , phonemizer_lang='''fr-fr''' ).input_ids self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer.decode(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer.decode(__SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , '''h ə l oʊ h aʊ ɑːɹ j uː''' ) self.assertEqual(__SCREAMING_SNAKE_CASE , '''ɛ l o h aʊ a ʁ j u''' ) def _a ( self : Any ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) __SCREAMING_SNAKE_CASE = '''Hello how Are you''' __SCREAMING_SNAKE_CASE = '''hello how are you''' __SCREAMING_SNAKE_CASE = tokenizer(__SCREAMING_SNAKE_CASE ).input_ids __SCREAMING_SNAKE_CASE = tokenizer(__SCREAMING_SNAKE_CASE ).input_ids self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : Optional[int] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) tokenizer.add_tokens(['''!''', '''?'''] ) tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} ) # fmt: off __SCREAMING_SNAKE_CASE = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94], [24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94], ] # fmt: on __SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] ) @staticmethod def _a ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = [d[key] for d in offsets] return retrieved_list def _a ( self : str ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.get_tokenizer(word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) # fmt: off # ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ" __SCREAMING_SNAKE_CASE = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98] # fmt: on __SCREAMING_SNAKE_CASE = tokenizer.decode(__SCREAMING_SNAKE_CASE , output_char_offsets=__SCREAMING_SNAKE_CASE , filter_word_delimiter_token=__SCREAMING_SNAKE_CASE ) # check Wav2Vec2CTCTokenizerOutput keys for char self.assertEqual(len(outputs.keys() ) , 2 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''char_offsets''' in outputs ) self.assertTrue(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) # check that order of chars is correct and identical for both outputs self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text ) self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] ) # check that offsets are actually correct for char # 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token, # 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98 self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] ) self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] ) def _a ( self : List[Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.get_tokenizer(word_delimiter_token='''|''' ) def check_list_tuples_equal(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple ): self.assertTrue(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) self.assertTrue(isinstance(outputs_list[0] , __SCREAMING_SNAKE_CASE ) ) # transform list to ModelOutput __SCREAMING_SNAKE_CASE = WavaVecaPhonemeCTCTokenizerOutput( {k: [d[k] for d in outputs_list] for k in outputs_list[0]} ) self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] ) def recursive_check(__SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int ): if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): [recursive_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for la, la in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )] self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "char_offsets" in outputs_batch: recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] ) # fmt: off __SCREAMING_SNAKE_CASE = [ [11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34], [24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34], ] # fmt: on # We assume that `decode` works as expected. All we will check now is # the output type is correct and the output is identical to `decode` # char __SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , output_char_offsets=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [tokenizer.decode(__SCREAMING_SNAKE_CASE , output_char_offsets=__SCREAMING_SNAKE_CASE ) for ids in sample_ids] check_list_tuples_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' ) def _a ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" pass @unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' ) def _a ( self : List[Any] ) -> str: """simple docstring""" pass @unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' ) def _a ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" pass @unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' ) def _a ( self : Tuple ) -> List[str]: """simple docstring""" pass def _a ( self : Optional[int] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=__SCREAMING_SNAKE_CASE ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): __SCREAMING_SNAKE_CASE = tokenizer.vocab_size __SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE ) self.assertNotEqual(__SCREAMING_SNAKE_CASE , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) __SCREAMING_SNAKE_CASE = ['''aaaaa bbbbbb''', '''cccccccccdddddddd'''] __SCREAMING_SNAKE_CASE = tokenizer.add_tokens(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer.vocab_size __SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE ) self.assertNotEqual(__SCREAMING_SNAKE_CASE , 0 ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) ) self.assertEqual(__SCREAMING_SNAKE_CASE , all_size + len(__SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertGreaterEqual(len(__SCREAMING_SNAKE_CASE ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) __SCREAMING_SNAKE_CASE = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''} __SCREAMING_SNAKE_CASE = tokenizer.add_special_tokens(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer.vocab_size __SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE ) self.assertNotEqual(__SCREAMING_SNAKE_CASE , 0 ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) ) self.assertEqual(__SCREAMING_SNAKE_CASE , all_size_a + len(__SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE = tokenizer.encode( '''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertGreaterEqual(len(__SCREAMING_SNAKE_CASE ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) @unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' ) def _a ( self : List[str] ) -> Any: """simple docstring""" pass @unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' ) def _a ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" pass def _a ( self : Optional[Any] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.get_tokenizers(fast=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): __SCREAMING_SNAKE_CASE = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t'''] __SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_string(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(output['''text'''] , __SCREAMING_SNAKE_CASE )
701
"""simple docstring""" from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__: def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : Optional[Any]=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=10 , __SCREAMING_SNAKE_CASE : str=[10, 20, 30, 40] , __SCREAMING_SNAKE_CASE : Optional[int]=[1, 1, 2, 1] , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Optional[Any]="relu" , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = embeddings_size __SCREAMING_SNAKE_CASE = hidden_sizes __SCREAMING_SNAKE_CASE = depths __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = scope __SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE ) def _a ( self : List[Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels ) __SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values, labels def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def _a ( self : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = TFRegNetModel(config=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _a ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = TFRegNetForImageClassification(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self : Optional[Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs __SCREAMING_SNAKE_CASE = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class A__( __magic_name__ , __magic_name__ , unittest.TestCase ): lowerCAmelCase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () lowerCAmelCase = ( {'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification} if is_tf_available() else {} ) lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False def _a ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFRegNetModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple ) -> Optional[Any]: """simple docstring""" return @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def _a ( self : Any ) -> Optional[Any]: """simple docstring""" pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , ) @slow def _a ( self : Dict ) -> List[Any]: """simple docstring""" super().test_keras_fit() @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def _a ( self : Dict ) -> Union[str, Any]: """simple docstring""" pass def _a ( self : List[Any] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] __SCREAMING_SNAKE_CASE = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE ) def _a ( self : Any ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def _a ( self : List[str] ) -> Tuple: """simple docstring""" def check_hidden_states_output(__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ): __SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , training=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __SCREAMING_SNAKE_CASE = self.model_tester.num_stages self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: __SCREAMING_SNAKE_CASE = layer_type __SCREAMING_SNAKE_CASE = True check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __SCREAMING_SNAKE_CASE = True check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(__SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]={} ): __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).to_tuple() def recursive_check(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict ): if isinstance(__SCREAMING_SNAKE_CASE , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): recursive_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) , msg=( '''Tuple and dict output are not equal. Difference:''' f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}""" ) , ) recursive_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE ) check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , {'''output_hidden_states''': True} ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE ) check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , {'''output_hidden_states''': True} ) def _a ( self : str ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE ) @slow def _a ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = TFRegNetModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def _a ( ) -> Dict: __SCREAMING_SNAKE_CASE = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class A__( unittest.TestCase ): @cached_property def _a ( self : List[Any] ) -> str: """simple docstring""" return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _a ( self : List[str] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) __SCREAMING_SNAKE_CASE = self.default_image_processor __SCREAMING_SNAKE_CASE = prepare_img() __SCREAMING_SNAKE_CASE = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''tf''' ) # forward pass __SCREAMING_SNAKE_CASE = model(**__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE ) # verify the logits __SCREAMING_SNAKE_CASE = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tf.constant([-0.41_80, -1.50_51, -3.48_36] ) tf.debugging.assert_near(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
690
0
"""simple docstring""" import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml lowerCAmelCase__ =NewType("DataClass", Any) lowerCAmelCase__ =NewType("DataClassType", Any) def _a ( UpperCAmelCase__ ) -> int: if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" ) def _a ( UpperCAmelCase__ ) -> List[Any]: __SCREAMING_SNAKE_CASE = {str(UpperCAmelCase__ ): choice for choice in choices} return lambda UpperCAmelCase__ : str_to_choice.get(UpperCAmelCase__ , UpperCAmelCase__ ) def _a ( *, UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = dataclasses.MISSING , UpperCAmelCase__ = dataclasses.MISSING , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> Tuple: if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls __SCREAMING_SNAKE_CASE = {} if aliases is not None: __SCREAMING_SNAKE_CASE = aliases if help is not None: __SCREAMING_SNAKE_CASE = help return dataclasses.field(metadata=UpperCAmelCase__ , default=UpperCAmelCase__ , default_factory=UpperCAmelCase__ , **UpperCAmelCase__ ) class A__( __magic_name__ ): lowerCAmelCase = 42 def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : str ) -> Optional[int]: """simple docstring""" if "formatter_class" not in kwargs: __SCREAMING_SNAKE_CASE = ArgumentDefaultsHelpFormatter super().__init__(**__SCREAMING_SNAKE_CASE ) if dataclasses.is_dataclass(__SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = [dataclass_types] __SCREAMING_SNAKE_CASE = list(__SCREAMING_SNAKE_CASE ) for dtype in self.dataclass_types: self._add_dataclass_arguments(__SCREAMING_SNAKE_CASE ) @staticmethod def _a ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = f"""--{field.name}""" __SCREAMING_SNAKE_CASE = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , __SCREAMING_SNAKE_CASE ): raise RuntimeError( '''Unresolved type detected, which should have been done with the help of ''' '''`typing.get_type_hints` method by default''' ) __SCREAMING_SNAKE_CASE = kwargs.pop('''aliases''' , [] ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = [aliases] __SCREAMING_SNAKE_CASE = getattr(field.type , '''__origin__''' , field.type ) if origin_type is Union or (hasattr(__SCREAMING_SNAKE_CASE , '''UnionType''' ) and isinstance(__SCREAMING_SNAKE_CASE , types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(__SCREAMING_SNAKE_CASE ) not in field.type.__args__ ): raise ValueError( '''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because''' ''' the argument parser only supports one type per argument.''' f""" Problem encountered in field \'{field.name}\'.""" ) if type(__SCREAMING_SNAKE_CASE ) not in field.type.__args__: # filter `str` in Union __SCREAMING_SNAKE_CASE = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] __SCREAMING_SNAKE_CASE = getattr(field.type , '''__origin__''' , field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) __SCREAMING_SNAKE_CASE = ( field.type.__args__[0] if isinstance(__SCREAMING_SNAKE_CASE , field.type.__args__[1] ) else field.type.__args__[1] ) __SCREAMING_SNAKE_CASE = getattr(field.type , '''__origin__''' , field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) __SCREAMING_SNAKE_CASE = {} if origin_type is Literal or (isinstance(field.type , __SCREAMING_SNAKE_CASE ) and issubclass(field.type , __SCREAMING_SNAKE_CASE )): if origin_type is Literal: __SCREAMING_SNAKE_CASE = field.type.__args__ else: __SCREAMING_SNAKE_CASE = [x.value for x in field.type] __SCREAMING_SNAKE_CASE = make_choice_type_function(kwargs['''choices'''] ) if field.default is not dataclasses.MISSING: __SCREAMING_SNAKE_CASE = field.default else: __SCREAMING_SNAKE_CASE = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument __SCREAMING_SNAKE_CASE = copy(__SCREAMING_SNAKE_CASE ) # Hack because type=bool in argparse does not behave as we want. __SCREAMING_SNAKE_CASE = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. __SCREAMING_SNAKE_CASE = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way __SCREAMING_SNAKE_CASE = default # This tells argparse we accept 0 or 1 value after --field_name __SCREAMING_SNAKE_CASE = '''?''' # This is the value that will get picked if we do --field_name (without value) __SCREAMING_SNAKE_CASE = True elif isclass(__SCREAMING_SNAKE_CASE ) and issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = field.type.__args__[0] __SCREAMING_SNAKE_CASE = '''+''' if field.default_factory is not dataclasses.MISSING: __SCREAMING_SNAKE_CASE = field.default_factory() elif field.default is dataclasses.MISSING: __SCREAMING_SNAKE_CASE = True else: __SCREAMING_SNAKE_CASE = field.type if field.default is not dataclasses.MISSING: __SCREAMING_SNAKE_CASE = field.default elif field.default_factory is not dataclasses.MISSING: __SCREAMING_SNAKE_CASE = field.default_factory() else: __SCREAMING_SNAKE_CASE = True parser.add_argument(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): __SCREAMING_SNAKE_CASE = False parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__SCREAMING_SNAKE_CASE ) def _a ( self : str , __SCREAMING_SNAKE_CASE : str ) -> Tuple: """simple docstring""" if hasattr(__SCREAMING_SNAKE_CASE , '''_argument_group_name''' ): __SCREAMING_SNAKE_CASE = self.add_argument_group(dtype._argument_group_name ) else: __SCREAMING_SNAKE_CASE = self try: __SCREAMING_SNAKE_CASE = get_type_hints(__SCREAMING_SNAKE_CASE ) except NameError: raise RuntimeError( f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """ '''removing line of `from __future__ import annotations` which opts in Postponed ''' '''Evaluation of Annotations (PEP 563)''' ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = '''.'''.join(map(__SCREAMING_SNAKE_CASE , sys.version_info[:3] ) ) raise RuntimeError( f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """ '''line of `from __future__ import annotations` which opts in union types as ''' '''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To ''' '''support Python versions that lower than 3.10, you need to use ''' '''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of ''' '''`X | None`.''' ) from ex raise for field in dataclasses.fields(__SCREAMING_SNAKE_CASE ): if not field.init: continue __SCREAMING_SNAKE_CASE = type_hints[field.name] self._parse_dataclass_field(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Any=None , ) -> Tuple[DataClass, ...]: """simple docstring""" if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): __SCREAMING_SNAKE_CASE = [] if args_filename: args_files.append(Path(__SCREAMING_SNAKE_CASE ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values __SCREAMING_SNAKE_CASE = ArgumentParser() args_file_parser.add_argument(__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , action='''append''' ) # Use only remaining args for further parsing (remove the args_file_flag) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = args_file_parser.parse_known_args(args=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = vars(__SCREAMING_SNAKE_CASE ).get(args_file_flag.lstrip('''-''' ) , __SCREAMING_SNAKE_CASE ) if cmd_args_file_paths: args_files.extend([Path(__SCREAMING_SNAKE_CASE ) for p in cmd_args_file_paths] ) __SCREAMING_SNAKE_CASE = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last __SCREAMING_SNAKE_CASE = file_args + args if args is not None else file_args + sys.argv[1:] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.parse_known_args(args=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [] for dtype in self.dataclass_types: __SCREAMING_SNAKE_CASE = {f.name for f in dataclasses.fields(__SCREAMING_SNAKE_CASE ) if f.init} __SCREAMING_SNAKE_CASE = {k: v for k, v in vars(__SCREAMING_SNAKE_CASE ).items() if k in keys} for k in keys: delattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = dtype(**__SCREAMING_SNAKE_CASE ) outputs.append(__SCREAMING_SNAKE_CASE ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(__SCREAMING_SNAKE_CASE ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" ) return (*outputs,) def _a ( self : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] = False ) -> Tuple[DataClass, ...]: """simple docstring""" __SCREAMING_SNAKE_CASE = set(args.keys() ) __SCREAMING_SNAKE_CASE = [] for dtype in self.dataclass_types: __SCREAMING_SNAKE_CASE = {f.name for f in dataclasses.fields(__SCREAMING_SNAKE_CASE ) if f.init} __SCREAMING_SNAKE_CASE = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) __SCREAMING_SNAKE_CASE = dtype(**__SCREAMING_SNAKE_CASE ) outputs.append(__SCREAMING_SNAKE_CASE ) if not allow_extra_keys and unused_keys: raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__SCREAMING_SNAKE_CASE )}""" ) return tuple(__SCREAMING_SNAKE_CASE ) def _a ( self : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] = False ) -> Tuple[DataClass, ...]: """simple docstring""" with open(Path(__SCREAMING_SNAKE_CASE ) , encoding='''utf-8''' ) as open_json_file: __SCREAMING_SNAKE_CASE = json.loads(open_json_file.read() ) __SCREAMING_SNAKE_CASE = self.parse_dict(__SCREAMING_SNAKE_CASE , allow_extra_keys=__SCREAMING_SNAKE_CASE ) return tuple(__SCREAMING_SNAKE_CASE ) def _a ( self : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] = False ) -> Tuple[DataClass, ...]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.parse_dict(yaml.safe_load(Path(__SCREAMING_SNAKE_CASE ).read_text() ) , allow_extra_keys=__SCREAMING_SNAKE_CASE ) return tuple(__SCREAMING_SNAKE_CASE )
702
"""simple docstring""" import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ =get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class A__( __magic_name__ , unittest.TestCase ): lowerCAmelCase = XLMRobertaTokenizer lowerCAmelCase = XLMRobertaTokenizerFast lowerCAmelCase = True lowerCAmelCase = True def _a ( self : Optional[int] ) -> List[Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE ) tokenizer.save_pretrained(self.tmpdirname ) def _a ( self : str ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = '''<pad>''' __SCREAMING_SNAKE_CASE = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) def _a ( self : int ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_02 ) def _a ( self : Tuple ) -> Optional[int]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 10_02 ) def _a ( self : int ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) __SCREAMING_SNAKE_CASE = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( __SCREAMING_SNAKE_CASE , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) __SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) self.assertListEqual( __SCREAMING_SNAKE_CASE , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) __SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ) self.assertListEqual( __SCREAMING_SNAKE_CASE , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def _a ( self : int ) -> Tuple: """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __SCREAMING_SNAKE_CASE = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) __SCREAMING_SNAKE_CASE = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Checks everything loads correctly in the same way __SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(__SCREAMING_SNAKE_CASE ) # Save tokenizer rust, legacy_format=True __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE ) # Checks it save with the same files self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Checks everything loads correctly in the same way __SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) shutil.rmtree(__SCREAMING_SNAKE_CASE ) # Save tokenizer rust, legacy_format=False __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way __SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) shutil.rmtree(__SCREAMING_SNAKE_CASE ) @cached_property def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' ) def _a ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" with tempfile.NamedTemporaryFile() as f: shutil.copyfile(__SCREAMING_SNAKE_CASE , f.name ) __SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(f.name , keep_accents=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = pickle.dumps(__SCREAMING_SNAKE_CASE ) pickle.loads(__SCREAMING_SNAKE_CASE ) def _a ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" if not self.test_rust_tokenizer: return __SCREAMING_SNAKE_CASE = self.get_tokenizer() __SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE = '''I was born in 92000, and this is falsé.''' __SCREAMING_SNAKE_CASE = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE = tokenizer.encode(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @slow def _a ( self : Any ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = '''Hello World!''' __SCREAMING_SNAKE_CASE = [0, 3_53_78, 66_61, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) ) @slow def _a ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) __SCREAMING_SNAKE_CASE = [ 0, 32_93, 83, 10, 45_52, 49_89, 79_86, 6_78, 10, 59_15, 1_11, 17_94_59, 12_48_50, 4, 60_44, 2_37, 12, 6, 5, 6, 4, 67_80, 7_05, 15, 13_88, 44, 3_78, 1_01_14, 7_11, 1_52, 20, 6, 5, 2_23_76, 6_42, 12_21, 1_51_90, 3_41_53, 4_50, 56_08, 9_59, 11_19, 5_77_02, 1_36, 1_86, 47, 10_98, 2_93_67, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 60_44, 2_37, 62_84, 5_09_01, 5_28, 31, 90, 34, 9_27, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) ) @slow def _a ( self : Optional[int] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = {'''input_ids''': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
690
0
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. lowerCAmelCase__ =abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def _a ( UpperCAmelCase__ ) -> Dict: from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(snake_case_ ) def _a ( UpperCAmelCase__ ) -> str: from diffusers.utils.testing_utils import pytest_terminal_summary_main __SCREAMING_SNAKE_CASE = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
703
"""simple docstring""" from __future__ import annotations lowerCAmelCase__ =8.9_8_8E9 # units = N * m^s * C^-2 def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> dict[str, float]: __SCREAMING_SNAKE_CASE = abs(chargea * chargea ) if (force, chargea, chargea, distance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if distance < 0: raise ValueError('''Distance cannot be negative''' ) if force == 0: __SCREAMING_SNAKE_CASE = COULOMBS_CONSTANT * charge_product / (distance**2) return {"force": force} elif chargea == 0: __SCREAMING_SNAKE_CASE = abs(UpperCAmelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge1": chargea} elif chargea == 0: __SCREAMING_SNAKE_CASE = abs(UpperCAmelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge2": chargea} elif distance == 0: __SCREAMING_SNAKE_CASE = (COULOMBS_CONSTANT * charge_product / abs(UpperCAmelCase__ )) ** 0.5 return {"distance": distance} raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
690
0
"""simple docstring""" from random import shuffle import tensorflow as tf from numpy import array def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> Tuple: __SCREAMING_SNAKE_CASE = int(_lowerCamelCase ) assert noofclusters < len(_lowerCamelCase ) # Find out the dimensionality __SCREAMING_SNAKE_CASE = len(vectors[0] ) # Will help select random centroids from among the available vectors __SCREAMING_SNAKE_CASE = list(range(len(_lowerCamelCase ) ) ) shuffle(_lowerCamelCase ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. __SCREAMING_SNAKE_CASE = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION __SCREAMING_SNAKE_CASE = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points __SCREAMING_SNAKE_CASE = [ tf.Variable(vectors[vector_indices[i]] ) for i in range(_lowerCamelCase ) ] ##These nodes will assign the centroid Variables the appropriate ##values __SCREAMING_SNAKE_CASE = tf.placeholder('''float64''' , [dim] ) __SCREAMING_SNAKE_CASE = [] for centroid in centroids: cent_assigns.append(tf.assign(_lowerCamelCase , _lowerCamelCase ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) __SCREAMING_SNAKE_CASE = [tf.Variable(0 ) for i in range(len(_lowerCamelCase ) )] ##These nodes will assign an assignment Variable the appropriate ##value __SCREAMING_SNAKE_CASE = tf.placeholder('''int32''' ) __SCREAMING_SNAKE_CASE = [] for assignment in assignments: cluster_assigns.append(tf.assign(_lowerCamelCase , _lowerCamelCase ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input __SCREAMING_SNAKE_CASE = tf.placeholder('''float''' , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors __SCREAMING_SNAKE_CASE = tf.reduce_mean(_lowerCamelCase , 0 ) ##Node for computing Euclidean distances # Placeholders for input __SCREAMING_SNAKE_CASE = tf.placeholder('''float''' , [dim] ) __SCREAMING_SNAKE_CASE = tf.placeholder('''float''' , [dim] ) __SCREAMING_SNAKE_CASE = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_lowerCamelCase , _lowerCamelCase ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input __SCREAMING_SNAKE_CASE = tf.placeholder('''float''' , [noofclusters] ) __SCREAMING_SNAKE_CASE = tf.argmin(_lowerCamelCase , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. __SCREAMING_SNAKE_CASE = tf.initialize_all_variables() # Initialize all variables sess.run(_lowerCamelCase ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. __SCREAMING_SNAKE_CASE = 1_00 for _ in range(_lowerCamelCase ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(_lowerCamelCase ) ): __SCREAMING_SNAKE_CASE = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. __SCREAMING_SNAKE_CASE = [ sess.run(_lowerCamelCase , feed_dict={va: vect, va: sess.run(_lowerCamelCase )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input __SCREAMING_SNAKE_CASE = sess.run( _lowerCamelCase , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(_lowerCamelCase ): # Collect all the vectors assigned to this cluster __SCREAMING_SNAKE_CASE = [ vectors[i] for i in range(len(_lowerCamelCase ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location __SCREAMING_SNAKE_CASE = sess.run( _lowerCamelCase , feed_dict={mean_input: array(_lowerCamelCase )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments __SCREAMING_SNAKE_CASE = sess.run(_lowerCamelCase ) __SCREAMING_SNAKE_CASE = sess.run(_lowerCamelCase ) return centroids, assignments
704
"""simple docstring""" import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ =logging.get_logger(__name__) def _a ( UpperCAmelCase__ ) -> Tuple: __SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , map_location='''cpu''' ) if "model" in sd.keys(): __SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , map_location='''cpu''' )['''model'''] # pop unnecessary weights __SCREAMING_SNAKE_CASE = [ '''decoder.version''', '''decoder.output_projection.weight''', ] for key in keys_to_delete: if key in sd: sd.pop(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = { '''decoder.project_in_dim.weight''': '''decoder.project_in.weight''', '''decoder.project_out_dim.weight''': '''decoder.project_out.weight''', '''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''', } for old_key, new_key in keys_to_rename.items(): if old_key in sd: __SCREAMING_SNAKE_CASE = sd.pop(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = list(sd.keys() ) for key in keys: if ".qkv_proj." in key: __SCREAMING_SNAKE_CASE = sd[key] # We split QKV in separate Q,K,V __SCREAMING_SNAKE_CASE = key.replace('''.qkv_proj.''' , '''.q_proj.''' ) __SCREAMING_SNAKE_CASE = key.replace('''.qkv_proj.''' , '''.k_proj.''' ) __SCREAMING_SNAKE_CASE = key.replace('''.qkv_proj.''' , '''.v_proj.''' ) __SCREAMING_SNAKE_CASE = value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch.split(UpperCAmelCase__ , depth // 3 , dim=0 ) __SCREAMING_SNAKE_CASE = q __SCREAMING_SNAKE_CASE = k __SCREAMING_SNAKE_CASE = v del sd[key] return sd @torch.no_grad() def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = load_checkpoint(UpperCAmelCase__ ) if config is not None: __SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained(UpperCAmelCase__ ) else: __SCREAMING_SNAKE_CASE = OPTConfig() __SCREAMING_SNAKE_CASE = OPTModel(UpperCAmelCase__ ).half().eval() model.load_state_dict(UpperCAmelCase__ ) # Check results Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ ) model.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": lowerCAmelCase__ =argparse.ArgumentParser() # Required parameters parser.add_argument( "--fairseq_path", type=str, help=( "path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:" " https://huggingface.co/models?other=opt_metasq" ), ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.") lowerCAmelCase__ =parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
690
0
import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class A__( unittest.TestCase ): def _a ( self : Optional[Any] ) -> List[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ) __SCREAMING_SNAKE_CASE = sd_pipe.to(_a ) sd_pipe.set_progress_bar_config(disable=_a ) sd_pipe.set_scheduler('''sample_euler''' ) __SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger""" __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = sd_pipe([prompt] , generator=_a , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) __SCREAMING_SNAKE_CASE = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _a ( self : Any ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) __SCREAMING_SNAKE_CASE = sd_pipe.to(_a ) sd_pipe.set_progress_bar_config(disable=_a ) sd_pipe.set_scheduler('''sample_euler''' ) __SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger""" __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = sd_pipe([prompt] , generator=_a , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) __SCREAMING_SNAKE_CASE = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1 def _a ( self : Tuple ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) __SCREAMING_SNAKE_CASE = sd_pipe.to(_a ) sd_pipe.set_progress_bar_config(disable=_a ) sd_pipe.set_scheduler('''sample_dpmpp_2m''' ) __SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger""" __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = sd_pipe( [prompt] , generator=_a , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=_a , ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) __SCREAMING_SNAKE_CASE = np.array( [0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
705
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class A__( __magic_name__ ): lowerCAmelCase = '''naver-clova-ix/donut-base-finetuned-docvqa''' lowerCAmelCase = ( '''This is a tool that answers a question about an document (pdf). It takes an input named `document` which ''' '''should be the document containing the information, as well as a `question` that is the question about the ''' '''document. It returns a text that contains the answer to the question.''' ) lowerCAmelCase = '''document_qa''' lowerCAmelCase = AutoProcessor lowerCAmelCase = VisionEncoderDecoderModel lowerCAmelCase = ['''image''', '''text'''] lowerCAmelCase = ['''text'''] def __init__( self : str , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Any: """simple docstring""" if not is_vision_available(): raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' ) super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : "Image" , __SCREAMING_SNAKE_CASE : str ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>''' __SCREAMING_SNAKE_CASE = task_prompt.replace('''{user_input}''' , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.pre_processor.tokenizer( __SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_ids __SCREAMING_SNAKE_CASE = self.pre_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple: """simple docstring""" return self.model.generate( inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__SCREAMING_SNAKE_CASE , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__SCREAMING_SNAKE_CASE , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__SCREAMING_SNAKE_CASE , ).sequences def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.pre_processor.batch_decode(__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' ) __SCREAMING_SNAKE_CASE = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' ) __SCREAMING_SNAKE_CASE = re.sub(r'''<.*?>''' , '''''' , __SCREAMING_SNAKE_CASE , count=1 ).strip() # remove first task start token __SCREAMING_SNAKE_CASE = self.pre_processor.tokenajson(__SCREAMING_SNAKE_CASE ) return sequence["answer"]
690
0
"""simple docstring""" import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ) -> Tuple: __SCREAMING_SNAKE_CASE = VideoMAEConfig() set_architecture_configs(snake_case__ , snake_case__ ) if "finetuned" not in model_name: __SCREAMING_SNAKE_CASE = False if "finetuned" in model_name: __SCREAMING_SNAKE_CASE = """huggingface/label-files""" if "kinetics" in model_name: __SCREAMING_SNAKE_CASE = 4_00 __SCREAMING_SNAKE_CASE = """kinetics400-id2label.json""" elif "ssv2" in model_name: __SCREAMING_SNAKE_CASE = 1_74 __SCREAMING_SNAKE_CASE = """something-something-v2-id2label.json""" else: raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' ) __SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) ) __SCREAMING_SNAKE_CASE = {int(snake_case__ ): v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE = idalabel __SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} return config def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]: if "small" in model_name: __SCREAMING_SNAKE_CASE = 3_84 __SCREAMING_SNAKE_CASE = 15_36 __SCREAMING_SNAKE_CASE = 12 __SCREAMING_SNAKE_CASE = 16 __SCREAMING_SNAKE_CASE = 12 __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = 1_92 __SCREAMING_SNAKE_CASE = 7_68 elif "large" in model_name: __SCREAMING_SNAKE_CASE = 10_24 __SCREAMING_SNAKE_CASE = 40_96 __SCREAMING_SNAKE_CASE = 24 __SCREAMING_SNAKE_CASE = 16 __SCREAMING_SNAKE_CASE = 12 __SCREAMING_SNAKE_CASE = 8 __SCREAMING_SNAKE_CASE = 5_12 __SCREAMING_SNAKE_CASE = 20_48 elif "huge" in model_name: __SCREAMING_SNAKE_CASE = 12_80 __SCREAMING_SNAKE_CASE = 51_20 __SCREAMING_SNAKE_CASE = 32 __SCREAMING_SNAKE_CASE = 16 __SCREAMING_SNAKE_CASE = 12 __SCREAMING_SNAKE_CASE = 8 __SCREAMING_SNAKE_CASE = 6_40 __SCREAMING_SNAKE_CASE = 25_60 elif "base" not in model_name: raise ValueError('''Model name should include either \"small\", \"base\", \"large\", or \"huge\"''' ) def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ) -> Tuple: if "encoder." in name: __SCREAMING_SNAKE_CASE = name.replace('''encoder.''' , '''''' ) if "cls_token" in name: __SCREAMING_SNAKE_CASE = name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' ) if "decoder_pos_embed" in name: __SCREAMING_SNAKE_CASE = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' ) if "pos_embed" in name and "decoder" not in name: __SCREAMING_SNAKE_CASE = name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' ) if "patch_embed.proj" in name: __SCREAMING_SNAKE_CASE = name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: __SCREAMING_SNAKE_CASE = name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' ) if "decoder.blocks" in name: __SCREAMING_SNAKE_CASE = name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' ) if "blocks" in name: __SCREAMING_SNAKE_CASE = name.replace('''blocks''' , '''videomae.encoder.layer''' ) if "attn.proj" in name: __SCREAMING_SNAKE_CASE = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name and "bias" not in name: __SCREAMING_SNAKE_CASE = name.replace('''attn''' , '''attention.self''' ) if "attn" in name: __SCREAMING_SNAKE_CASE = name.replace('''attn''' , '''attention.attention''' ) if "norm1" in name: __SCREAMING_SNAKE_CASE = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: __SCREAMING_SNAKE_CASE = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: __SCREAMING_SNAKE_CASE = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: __SCREAMING_SNAKE_CASE = name.replace('''mlp.fc2''' , '''output.dense''' ) if "decoder_embed" in name: __SCREAMING_SNAKE_CASE = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' ) if "decoder_norm" in name: __SCREAMING_SNAKE_CASE = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' ) if "decoder_pred" in name: __SCREAMING_SNAKE_CASE = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: __SCREAMING_SNAKE_CASE = name.replace('''norm.weight''' , '''videomae.layernorm.weight''' ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: __SCREAMING_SNAKE_CASE = name.replace('''norm.bias''' , '''videomae.layernorm.bias''' ) if "head" in name and "decoder" not in name: __SCREAMING_SNAKE_CASE = name.replace('''head''' , '''classifier''' ) return name def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ , UpperCAmelCase__ ) -> int: for key in orig_state_dict.copy().keys(): __SCREAMING_SNAKE_CASE = orig_state_dict.pop(snake_case__ ) if key.startswith('''encoder.''' ): __SCREAMING_SNAKE_CASE = key.replace('''encoder.''' , '''''' ) if "qkv" in key: __SCREAMING_SNAKE_CASE = key.split('''.''' ) if key.startswith('''decoder.blocks''' ): __SCREAMING_SNAKE_CASE = config.decoder_hidden_size __SCREAMING_SNAKE_CASE = int(key_split[2] ) __SCREAMING_SNAKE_CASE = """decoder.decoder_layers.""" if "weight" in key: __SCREAMING_SNAKE_CASE = val[:dim, :] __SCREAMING_SNAKE_CASE = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE = val[-dim:, :] else: __SCREAMING_SNAKE_CASE = config.hidden_size __SCREAMING_SNAKE_CASE = int(key_split[1] ) __SCREAMING_SNAKE_CASE = """videomae.encoder.layer.""" if "weight" in key: __SCREAMING_SNAKE_CASE = val[:dim, :] __SCREAMING_SNAKE_CASE = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE = val[-dim:, :] else: __SCREAMING_SNAKE_CASE = val return orig_state_dict def SCREAMING_SNAKE_CASE ( ) -> List[str]: __SCREAMING_SNAKE_CASE = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' ) __SCREAMING_SNAKE_CASE = np.load(snake_case__ ) return list(snake_case__ ) def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[Any]: __SCREAMING_SNAKE_CASE = get_videomae_config(snake_case__ ) if "finetuned" in model_name: __SCREAMING_SNAKE_CASE = VideoMAEForVideoClassification(snake_case__ ) else: __SCREAMING_SNAKE_CASE = VideoMAEForPreTraining(snake_case__ ) # download original checkpoint, hosted on Google Drive __SCREAMING_SNAKE_CASE = """pytorch_model.bin""" gdown.cached_download(snake_case__ , snake_case__ , quiet=snake_case__ ) __SCREAMING_SNAKE_CASE = torch.load(snake_case__ , map_location='''cpu''' ) if "model" in files: __SCREAMING_SNAKE_CASE = files["""model"""] else: __SCREAMING_SNAKE_CASE = files["""module"""] __SCREAMING_SNAKE_CASE = convert_state_dict(snake_case__ , snake_case__ ) model.load_state_dict(snake_case__ ) model.eval() # verify model on basic input __SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) __SCREAMING_SNAKE_CASE = prepare_video() __SCREAMING_SNAKE_CASE = image_processor(snake_case__ , return_tensors='''pt''' ) if "finetuned" not in model_name: __SCREAMING_SNAKE_CASE = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' ) __SCREAMING_SNAKE_CASE = torch.load(snake_case__ ) __SCREAMING_SNAKE_CASE = model(**snake_case__ ) __SCREAMING_SNAKE_CASE = outputs.logits __SCREAMING_SNAKE_CASE = [ """videomae-small-finetuned-kinetics""", """videomae-small-finetuned-ssv2""", # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) """videomae-base-short""", """videomae-base-short-finetuned-kinetics""", """videomae-base""", """videomae-base-finetuned-kinetics""", """videomae-large""", """videomae-large-finetuned-kinetics""", """videomae-huge-finetuned-kinetics""", # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) """videomae-base-short-ssv2""", """videomae-base-short-finetuned-ssv2""", """videomae-base-ssv2""", """videomae-base-finetuned-ssv2""", ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": __SCREAMING_SNAKE_CASE = torch.Size([1, 4_00] ) __SCREAMING_SNAKE_CASE = torch.tensor([-0.9291, -0.4061, -0.9307] ) elif model_name == "videomae-small-finetuned-ssv2": __SCREAMING_SNAKE_CASE = torch.Size([1, 1_74] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.2671, -0.4689, -0.8235] ) elif model_name == "videomae-base": __SCREAMING_SNAKE_CASE = torch.Size([1, 14_08, 15_36] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] ) elif model_name == "videomae-base-short": __SCREAMING_SNAKE_CASE = torch.Size([1, 14_08, 15_36] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] ) # we verified the loss both for normalized and unnormalized targets for this one __SCREAMING_SNAKE_CASE = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] ) elif model_name == "videomae-large": __SCREAMING_SNAKE_CASE = torch.Size([1, 14_08, 15_36] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] ) elif model_name == "videomae-large-finetuned-kinetics": __SCREAMING_SNAKE_CASE = torch.Size([1, 4_00] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.0771, 0.0011, -0.3625] ) elif model_name == "videomae-huge-finetuned-kinetics": __SCREAMING_SNAKE_CASE = torch.Size([1, 4_00] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.2433, 0.1632, -0.4894] ) elif model_name == "videomae-base-short-finetuned-kinetics": __SCREAMING_SNAKE_CASE = torch.Size([1, 4_00] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.6588, 0.0990, -0.2493] ) elif model_name == "videomae-base-finetuned-kinetics": __SCREAMING_SNAKE_CASE = torch.Size([1, 4_00] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.3669, -0.0688, -0.2421] ) elif model_name == "videomae-base-short-ssv2": __SCREAMING_SNAKE_CASE = torch.Size([1, 14_08, 15_36] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] ) elif model_name == "videomae-base-short-finetuned-ssv2": __SCREAMING_SNAKE_CASE = torch.Size([1, 1_74] ) __SCREAMING_SNAKE_CASE = torch.tensor([-0.0537, -0.1539, -0.3266] ) elif model_name == "videomae-base-ssv2": __SCREAMING_SNAKE_CASE = torch.Size([1, 14_08, 15_36] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] ) elif model_name == "videomae-base-finetuned-ssv2": __SCREAMING_SNAKE_CASE = torch.Size([1, 1_74] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.1961, -0.8337, -0.6389] ) else: raise ValueError(f"""Model name not supported. Should be one of {model_names}""" ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] , snake_case__ , atol=1E-4 ) else: print('''Logits:''' , logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1E-4 ) print('''Logits ok!''' ) # verify loss, if applicable if model_name == "videomae-base-short": __SCREAMING_SNAKE_CASE = outputs.loss assert torch.allclose(snake_case__ , snake_case__ , atol=1E-4 ) print('''Loss ok!''' ) if pytorch_dump_folder_path is not None: print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(snake_case__ ) model.save_pretrained(snake_case__ ) if push_to_hub: print('''Pushing to the hub...''' ) model.push_to_hub(snake_case__ , organization='''nielsr''' ) if __name__ == "__main__": lowerCAmelCase__ =argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4", type=str, help=( "URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct" " download link." ), ) parser.add_argument( "--pytorch_dump_folder_path", default="/Users/nielsrogge/Documents/VideoMAE/Test", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) lowerCAmelCase__ =parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
706
"""simple docstring""" import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class A__( unittest.TestCase ): @property def _a ( self : Optional[Any] ) -> Tuple: """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def _a ( self : str ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.dummy_uncond_unet __SCREAMING_SNAKE_CASE = KarrasVeScheduler() __SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' , return_dict=__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __SCREAMING_SNAKE_CASE = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class A__( unittest.TestCase ): def _a ( self : Any ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = '''google/ncsnpp-celebahq-256''' __SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = KarrasVeScheduler() __SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe(num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) __SCREAMING_SNAKE_CASE = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
690
0
"""simple docstring""" from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class A__( __magic_name__ ): def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict = None , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : Tuple = None , __SCREAMING_SNAKE_CASE : Tuple = False , __SCREAMING_SNAKE_CASE : Union[str, Any] = False , __SCREAMING_SNAKE_CASE : Tuple = None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Dict: """simple docstring""" super().__init__( __lowerCAmelCase , split=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , streaming=__lowerCAmelCase , num_proc=__lowerCAmelCase , **__lowerCAmelCase , ) __SCREAMING_SNAKE_CASE = path_or_paths if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else {self.split: path_or_paths} __SCREAMING_SNAKE_CASE = Text( cache_dir=__lowerCAmelCase , data_files=__lowerCAmelCase , features=__lowerCAmelCase , **__lowerCAmelCase , ) def _a ( self : Optional[Any] ) -> int: """simple docstring""" if self.streaming: __SCREAMING_SNAKE_CASE = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None self.builder.download_and_prepare( download_config=__lowerCAmelCase , download_mode=__lowerCAmelCase , verification_mode=__lowerCAmelCase , base_path=__lowerCAmelCase , num_proc=self.num_proc , ) __SCREAMING_SNAKE_CASE = self.builder.as_dataset( split=self.split , verification_mode=__lowerCAmelCase , in_memory=self.keep_in_memory ) return dataset
707
"""simple docstring""" import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase__ =logging.get_logger(__name__) lowerCAmelCase__ ={"vocab_file": "spiece.model"} lowerCAmelCase__ ={ "vocab_file": { "AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model", } } lowerCAmelCase__ ={ "AI-Sweden/gpt-sw3-126m": 2_048, "AI-Sweden/gpt-sw3-350m": 2_048, "AI-Sweden/gpt-sw3-1.6b": 2_048, "AI-Sweden/gpt-sw3-6.7b": 2_048, "AI-Sweden/gpt-sw3-20b": 2_048, } class A__( __magic_name__ ): lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = ['''input_ids''', '''attention_mask'''] def __init__( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Dict , ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs __SCREAMING_SNAKE_CASE = kwargs.get('''name_or_path''' ) if name_or_path is None: logger.warning( '''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,''' ''' you are testing the model, this can safely be ignored''' ) __SCREAMING_SNAKE_CASE = '''None''' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing __SCREAMING_SNAKE_CASE = '''<|endoftext|>''' if eos_token is None else eos_token __SCREAMING_SNAKE_CASE = '''<unk>''' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: __SCREAMING_SNAKE_CASE = unk_token if pad_token is None else pad_token __SCREAMING_SNAKE_CASE = eos_token if bos_token is None else bos_token else: __SCREAMING_SNAKE_CASE = '''<pad>''' if pad_token is None else pad_token __SCREAMING_SNAKE_CASE = '''<s>''' if bos_token is None else bos_token super().__init__( do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = do_lower_case __SCREAMING_SNAKE_CASE = remove_space __SCREAMING_SNAKE_CASE = keep_accents __SCREAMING_SNAKE_CASE = vocab_file __SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__SCREAMING_SNAKE_CASE ) # Used for whitespace normalization in input texts # fmt : off __SCREAMING_SNAKE_CASE = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', '''„'''} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing __SCREAMING_SNAKE_CASE = re.compile( f"""[{"".join(map(__SCREAMING_SNAKE_CASE , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(1_27 , 1_60 ) ) + [1_60, 1_73, 82_03] ) )}]""" ) def __getstate__( self : List[str] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.__dict__.copy() __SCREAMING_SNAKE_CASE = None return state def __setstate__( self : int , __SCREAMING_SNAKE_CASE : Optional[int] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def _a ( self : Optional[Any] ) -> int: """simple docstring""" return len(self.sp_model ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.non_printing_characters_re.sub('''''' , __SCREAMING_SNAKE_CASE ) # Normalize whitespaces __SCREAMING_SNAKE_CASE = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] ) # NFC Unicode normalization __SCREAMING_SNAKE_CASE = unicodedata.normalize('''NFC''' , __SCREAMING_SNAKE_CASE ) return text def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.preprocess_text(__SCREAMING_SNAKE_CASE ) return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : str ) -> int: """simple docstring""" return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> str: """simple docstring""" return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE ) @staticmethod def _a ( __SCREAMING_SNAKE_CASE : str ) -> str: """simple docstring""" return out_string def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = '''''' __SCREAMING_SNAKE_CASE = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = [] else: current_sub_tokens.append(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = False out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) return out_string def _a ( self : Union[str, Any] ) -> Dict[str, int]: """simple docstring""" __SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __SCREAMING_SNAKE_CASE = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi: __SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE ) return (out_vocab_file,) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: """simple docstring""" if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = self.preprocess_text(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.sp_model.encode(__SCREAMING_SNAKE_CASE ) else: __SCREAMING_SNAKE_CASE = [self.preprocess_text(__SCREAMING_SNAKE_CASE ) for t in text] __SCREAMING_SNAKE_CASE = self.sp_model.encode(__SCREAMING_SNAKE_CASE ) if return_tensors is True or return_tensors == "pt": __SCREAMING_SNAKE_CASE = torch.tensor(__SCREAMING_SNAKE_CASE ) return token_ids def _a ( self : Any , __SCREAMING_SNAKE_CASE : Union[int, List[int]] ) -> str: """simple docstring""" return self.sp_model.decode(__SCREAMING_SNAKE_CASE ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : "Conversation" ) -> List[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()] __SCREAMING_SNAKE_CASE = ( f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(__SCREAMING_SNAKE_CASE ) + f"""{self.bos_token}Bot:""" ) return self.encode(text=__SCREAMING_SNAKE_CASE )
690
0
import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=10_24 ) -> Dict: __SCREAMING_SNAKE_CASE = [], [] __SCREAMING_SNAKE_CASE = list(zip(UpperCAmelCase__ , UpperCAmelCase__ ) ) __SCREAMING_SNAKE_CASE = sorted_examples[0] def is_too_big(UpperCAmelCase__ ): return tok(UpperCAmelCase__ , return_tensors='''pt''' ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): __SCREAMING_SNAKE_CASE = new_src + """ """ + src __SCREAMING_SNAKE_CASE = new_tgt + """ """ + tgt if is_too_big(UpperCAmelCase__ ) or is_too_big(UpperCAmelCase__ ): # cant fit, finalize example finished_src.append(UpperCAmelCase__ ) finished_tgt.append(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = src, tgt else: # can fit, keep adding __SCREAMING_SNAKE_CASE = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(UpperCAmelCase__ ) finished_tgt.append(UpperCAmelCase__ ) return finished_src, finished_tgt def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = Path(UpperCAmelCase__ ) save_path.mkdir(exist_ok=UpperCAmelCase__ ) for split in ["train"]: __SCREAMING_SNAKE_CASE = data_dir / f"""{split}.source""", data_dir / f"""{split}.target""" __SCREAMING_SNAKE_CASE = [x.rstrip() for x in Path(UpperCAmelCase__ ).open().readlines()] __SCREAMING_SNAKE_CASE = [x.rstrip() for x in Path(UpperCAmelCase__ ).open().readlines()] __SCREAMING_SNAKE_CASE = pack_examples(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) print(f"""packed {split} split from {len(UpperCAmelCase__ )} examples -> {len(UpperCAmelCase__ )}.""" ) Path(save_path / f"""{split}.source""" ).open('''w''' ).write('''\n'''.join(UpperCAmelCase__ ) ) Path(save_path / f"""{split}.target""" ).open('''w''' ).write('''\n'''.join(UpperCAmelCase__ ) ) for split in ["val", "test"]: __SCREAMING_SNAKE_CASE = data_dir / f"""{split}.source""", data_dir / f"""{split}.target""" shutil.copyfile(UpperCAmelCase__ , save_path / f"""{split}.source""" ) shutil.copyfile(UpperCAmelCase__ , save_path / f"""{split}.target""" ) def _a ( ) -> Tuple: __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument('''--tok_name''' , type=UpperCAmelCase__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' ) parser.add_argument('''--max_seq_len''' , type=UpperCAmelCase__ , default=1_28 ) parser.add_argument('''--data_dir''' , type=UpperCAmelCase__ ) parser.add_argument('''--save_path''' , type=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = parser.parse_args() __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(UpperCAmelCase__ , Path(args.data_dir ) , args.max_seq_len , args.save_path ) if __name__ == "__main__": packer_cli()
708
"""simple docstring""" from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent lowerCAmelCase__ ={"UserAgent": UserAgent().random} def _a ( UpperCAmelCase__ ) -> dict: __SCREAMING_SNAKE_CASE = script.contents[0] __SCREAMING_SNAKE_CASE = json.loads(data[data.find('''{"config"''' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class A__: def __init__( self : Dict , __SCREAMING_SNAKE_CASE : int ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = f"""https://www.instagram.com/{username}/""" __SCREAMING_SNAKE_CASE = self.get_json() def _a ( self : List[Any] ) -> dict: """simple docstring""" __SCREAMING_SNAKE_CASE = requests.get(self.url , headers=__SCREAMING_SNAKE_CASE ).text __SCREAMING_SNAKE_CASE = BeautifulSoup(__SCREAMING_SNAKE_CASE , '''html.parser''' ).find_all('''script''' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : Tuple ) -> str: """simple docstring""" return f"""{self.__class__.__name__}('{self.username}')""" def __str__( self : Optional[int] ) -> str: """simple docstring""" return f"""{self.fullname} ({self.username}) is {self.biography}""" @property def _a ( self : Tuple ) -> str: """simple docstring""" return self.user_data["username"] @property def _a ( self : List[Any] ) -> str: """simple docstring""" return self.user_data["full_name"] @property def _a ( self : Optional[Any] ) -> str: """simple docstring""" return self.user_data["biography"] @property def _a ( self : List[str] ) -> str: """simple docstring""" return self.user_data["business_email"] @property def _a ( self : Any ) -> str: """simple docstring""" return self.user_data["external_url"] @property def _a ( self : Any ) -> int: """simple docstring""" return self.user_data["edge_followed_by"]["count"] @property def _a ( self : Dict ) -> int: """simple docstring""" return self.user_data["edge_follow"]["count"] @property def _a ( self : str ) -> int: """simple docstring""" return self.user_data["edge_owner_to_timeline_media"]["count"] @property def _a ( self : Union[str, Any] ) -> str: """simple docstring""" return self.user_data["profile_pic_url_hd"] @property def _a ( self : Tuple ) -> bool: """simple docstring""" return self.user_data["is_verified"] @property def _a ( self : Union[str, Any] ) -> bool: """simple docstring""" return self.user_data["is_private"] def _a ( UpperCAmelCase__ = "github" ) -> None: import os if os.environ.get('''CI''' ): return # test failing on GitHub Actions __SCREAMING_SNAKE_CASE = InstagramUser(UpperCAmelCase__ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , UpperCAmelCase__ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_50 assert instagram_user.number_of_followers > 12_00_00 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "[email protected]" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('''https://instagram.''' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase__ =InstagramUser("github") print(instagram_user) print(F'''{instagram_user.number_of_posts = }''') print(F'''{instagram_user.number_of_followers = }''') print(F'''{instagram_user.number_of_followings = }''') print(F'''{instagram_user.email = }''') print(F'''{instagram_user.website = }''') print(F'''{instagram_user.profile_picture_url = }''') print(F'''{instagram_user.is_verified = }''') print(F'''{instagram_user.is_private = }''')
690
0
"""simple docstring""" import math def _a ( UpperCAmelCase__ ) -> list[int]: __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = 2 __SCREAMING_SNAKE_CASE = int(math.sqrt(_lowercase ) ) # Size of every segment __SCREAMING_SNAKE_CASE = [True] * (end + 1) __SCREAMING_SNAKE_CASE = [] while start <= end: if temp[start] is True: in_prime.append(_lowercase ) for i in range(start * start , end + 1 , _lowercase ): __SCREAMING_SNAKE_CASE = False start += 1 prime += in_prime __SCREAMING_SNAKE_CASE = end + 1 __SCREAMING_SNAKE_CASE = min(2 * end , _lowercase ) while low <= n: __SCREAMING_SNAKE_CASE = [True] * (high - low + 1) for each in in_prime: __SCREAMING_SNAKE_CASE = math.floor(low / each ) * each if t < low: t += each for j in range(_lowercase , high + 1 , _lowercase ): __SCREAMING_SNAKE_CASE = False for j in range(len(_lowercase ) ): if temp[j] is True: prime.append(j + low ) __SCREAMING_SNAKE_CASE = high + 1 __SCREAMING_SNAKE_CASE = min(high + end , _lowercase ) return prime print(sieve(10**6))
709
"""simple docstring""" from sklearn.metrics import recall_score import datasets lowerCAmelCase__ ="\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n" lowerCAmelCase__ ="\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n" lowerCAmelCase__ ="\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__( datasets.Metric ): def _a ( self : Any ) -> int: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ), '''references''': datasets.Sequence(datasets.Value('''int32''' ) ), } if self.config_name == '''multilabel''' else { '''predictions''': datasets.Value('''int32''' ), '''references''': datasets.Value('''int32''' ), } ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]=1 , __SCREAMING_SNAKE_CASE : Optional[Any]="binary" , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : List[Any]="warn" , ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = recall_score( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , pos_label=__SCREAMING_SNAKE_CASE , average=__SCREAMING_SNAKE_CASE , sample_weight=__SCREAMING_SNAKE_CASE , zero_division=__SCREAMING_SNAKE_CASE , ) return {"recall": float(__SCREAMING_SNAKE_CASE ) if score.size == 1 else score}
690
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase__ =logging.get_logger(__name__) lowerCAmelCase__ ={ '''microsoft/swin-tiny-patch4-window7-224''': ( '''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json''' ), # See all Swin models at https://huggingface.co/models?filter=swin } class A__( UpperCAmelCase__ , UpperCAmelCase__ ): lowerCAmelCase = '''swin''' lowerCAmelCase = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=2_24 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=96 , __SCREAMING_SNAKE_CASE : Optional[int]=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE : Any=[3, 6, 12, 24] , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : Optional[Any]=4.0 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[int]=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=0.0 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Optional[int]=0.02 , __SCREAMING_SNAKE_CASE : Dict=1E-5 , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : Any=None , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> Optional[int]: """simple docstring""" super().__init__(**__lowerCAmelCase ) __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = patch_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = embed_dim __SCREAMING_SNAKE_CASE = depths __SCREAMING_SNAKE_CASE = len(__lowerCAmelCase ) __SCREAMING_SNAKE_CASE = num_heads __SCREAMING_SNAKE_CASE = window_size __SCREAMING_SNAKE_CASE = mlp_ratio __SCREAMING_SNAKE_CASE = qkv_bias __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = drop_path_rate __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = use_absolute_embeddings __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __SCREAMING_SNAKE_CASE = int(embed_dim * 2 ** (len(__lowerCAmelCase ) - 1) ) __SCREAMING_SNAKE_CASE = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(__lowerCAmelCase ) + 1 )] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices( out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names ) class A__( UpperCAmelCase__ ): lowerCAmelCase = version.parse('''1.11''' ) @property def _a ( self : Any ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def _a ( self : Optional[int] ) -> float: """simple docstring""" return 1E-4
710
"""simple docstring""" def _a ( UpperCAmelCase__ = 10**9 ) -> int: __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 2 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value __SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(F'''{solution() = }''')
690
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A__( __UpperCAmelCase , unittest.TestCase ): lowerCAmelCase = KandinskyVaaPipeline lowerCAmelCase = [ """image_embeds""", """negative_image_embeds""", ] lowerCAmelCase = ["""image_embeds""", """negative_image_embeds"""] lowerCAmelCase = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] lowerCAmelCase = False @property def _a ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" return 32 @property def _a ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" return 32 @property def _a ( self : str ) -> List[str]: """simple docstring""" return self.time_input_dim @property def _a ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" return self.time_input_dim * 4 @property def _a ( self : Tuple ) -> Tuple: """simple docstring""" return 1_00 @property def _a ( self : List[Any] ) -> Optional[Any]: """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = { '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } __SCREAMING_SNAKE_CASE = UNetaDConditionModel(**UpperCAmelCase_ ) return model @property def _a ( self : str ) -> Any: """simple docstring""" return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def _a ( self : Any ) -> Dict: """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = VQModel(**self.dummy_movq_kwargs ) return model def _a ( self : List[str] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.dummy_unet __SCREAMING_SNAKE_CASE = self.dummy_movq __SCREAMING_SNAKE_CASE = DDIMScheduler( num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=UpperCAmelCase_ , ) __SCREAMING_SNAKE_CASE = { '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]=0 ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) __SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( UpperCAmelCase_ ) if str(UpperCAmelCase_ ).startswith('''mps''' ): __SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase_ ) else: __SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ ) __SCREAMING_SNAKE_CASE = { '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''guidance_scale''': 4.0, '''num_inference_steps''': 2, '''output_type''': '''np''', } return inputs def _a ( self : List[str] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = '''cpu''' __SCREAMING_SNAKE_CASE = self.get_dummy_components() __SCREAMING_SNAKE_CASE = self.pipeline_class(**UpperCAmelCase_ ) __SCREAMING_SNAKE_CASE = pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) __SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(UpperCAmelCase_ ) ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = pipe( **self.get_dummy_inputs(UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ , )[0] __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __SCREAMING_SNAKE_CASE = np.array( [0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class A__( unittest.TestCase ): def _a ( self : int ) -> Tuple: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : int ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' ) __SCREAMING_SNAKE_CASE = KandinskyVaaPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(UpperCAmelCase_ ) __SCREAMING_SNAKE_CASE = KandinskyVaaPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa ) __SCREAMING_SNAKE_CASE = pipeline.to(UpperCAmelCase_ ) pipeline.set_progress_bar_config(disable=UpperCAmelCase_ ) __SCREAMING_SNAKE_CASE = '''red cat, 4k photo''' __SCREAMING_SNAKE_CASE = torch.Generator(device='''cuda''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = pipe_prior( UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() __SCREAMING_SNAKE_CASE = torch.Generator(device='''cuda''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipeline( image_embeds=UpperCAmelCase_ , negative_image_embeds=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=1_00 , output_type='''np''' , ) __SCREAMING_SNAKE_CASE = output.images[0] assert image.shape == (5_12, 5_12, 3) assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
711
"""simple docstring""" import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset lowerCAmelCase__ =pd.read_csv( "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/" "position_salaries.csv" ) lowerCAmelCase__ =dataset.iloc[:, 1:2].values lowerCAmelCase__ =dataset.iloc[:, 2].values lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ =train_test_split(X, y, test_size=0.2, random_state=0) lowerCAmelCase__ =PolynomialFeatures(degree=4) lowerCAmelCase__ =poly_reg.fit_transform(X) lowerCAmelCase__ =LinearRegression() pol_reg.fit(X_poly, y) def _a ( ) -> List[Any]: plt.scatter(UpperCAmelCase__ , UpperCAmelCase__ , color='''red''' ) plt.plot(UpperCAmelCase__ , pol_reg.predict(poly_reg.fit_transform(UpperCAmelCase__ ) ) , color='''blue''' ) plt.title('''Truth or Bluff (Linear Regression)''' ) plt.xlabel('''Position level''' ) plt.ylabel('''Salary''' ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
690
0
"""simple docstring""" import logging import os from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional from tqdm import auto as tqdm_lib lowerCAmelCase__ ={ 'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL, } lowerCAmelCase__ =logging.WARNING def _a ( ) -> List[str]: __SCREAMING_SNAKE_CASE = os.getenv('''DATASETS_VERBOSITY''' , _lowercase ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f"""Unknown option DATASETS_VERBOSITY={env_level_str}, """ f"""has to be one of: { ", ".join(log_levels.keys() ) }""" ) return _default_log_level def _a ( ) -> Optional[int]: return __name__.split('''.''' )[0] def _a ( ) -> Optional[Any]: return logging.getLogger(_get_library_name() ) def _a ( ) -> List[Any]: # Apply our default configuration to the library root logger. __SCREAMING_SNAKE_CASE = _get_library_root_logger() library_root_logger.setLevel(_get_default_logging_level() ) def _a ( ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = _get_library_root_logger() library_root_logger.setLevel(logging.NOTSET ) def _a ( UpperCAmelCase__ = None ) -> Optional[int]: if name is None: __SCREAMING_SNAKE_CASE = _get_library_name() return logging.getLogger(_lowercase ) def _a ( ) -> List[str]: return _get_library_root_logger().getEffectiveLevel() def _a ( UpperCAmelCase__ ) -> List[Any]: _get_library_root_logger().setLevel(_lowercase ) def _a ( ) -> Union[str, Any]: return set_verbosity(_lowercase ) def _a ( ) -> Tuple: return set_verbosity(_lowercase ) def _a ( ) -> Any: return set_verbosity(_lowercase ) def _a ( ) -> Dict: return set_verbosity(_lowercase ) def _a ( ) -> Dict: __SCREAMING_SNAKE_CASE = False def _a ( ) -> List[Any]: __SCREAMING_SNAKE_CASE = True # Configure the library root logger at the module level (singleton-like) _configure_library_root_logger() class A__: def __init__( self : str , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : int ) -> str: # pylint: disable=unused-argument """simple docstring""" __SCREAMING_SNAKE_CASE = args[0] if args else None def __iter__( self : Any ) -> Any: """simple docstring""" return iter(self._iterator ) def __getattr__( self : List[str] , __SCREAMING_SNAKE_CASE : List[str] ) -> Dict: """simple docstring""" def empty_fn(*__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : Tuple ): # pylint: disable=unused-argument return return empty_fn def __enter__( self : List[Any] ) -> Optional[Any]: """simple docstring""" return self def __exit__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> List[Any]: """simple docstring""" return lowerCAmelCase__ =True class A__: def __call__( self : List[Any] , *__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str=False , **__SCREAMING_SNAKE_CASE : str ) -> Dict: """simple docstring""" if _tqdm_active and not disable: return tqdm_lib.tqdm(*UpperCamelCase__ , **UpperCamelCase__ ) else: return EmptyTqdm(*UpperCamelCase__ , **UpperCamelCase__ ) def _a ( self : Tuple , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*UpperCamelCase__ , **UpperCamelCase__ ) def _a ( self : Dict ) -> Optional[Any]: """simple docstring""" if _tqdm_active: return tqdm_lib.tqdm.get_lock() lowerCAmelCase__ =_tqdm_cls() def _a ( ) -> Tuple: global _tqdm_active return bool(_tqdm_active ) def _a ( ) -> Any: global _tqdm_active __SCREAMING_SNAKE_CASE = True def _a ( ) -> Dict: global _tqdm_active __SCREAMING_SNAKE_CASE = False
712
"""simple docstring""" from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class A__: lowerCAmelCase = MBartConfig lowerCAmelCase = {} lowerCAmelCase = '''gelu''' def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple=13 , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=99 , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Any=4 , __SCREAMING_SNAKE_CASE : List[str]=37 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Any=20 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : Optional[int]=1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0 , ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = eos_token_id __SCREAMING_SNAKE_CASE = pad_token_id __SCREAMING_SNAKE_CASE = bos_token_id def _a ( self : List[str] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __SCREAMING_SNAKE_CASE = prepare_mbart_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return config, inputs_dict def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFMBartModel(config=__SCREAMING_SNAKE_CASE ).get_decoder() __SCREAMING_SNAKE_CASE = inputs_dict['''input_ids'''] __SCREAMING_SNAKE_CASE = input_ids[:1, :] __SCREAMING_SNAKE_CASE = inputs_dict['''attention_mask'''][:1, :] __SCREAMING_SNAKE_CASE = inputs_dict['''head_mask'''] __SCREAMING_SNAKE_CASE = 1 # first forward pass __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple() __SCREAMING_SNAKE_CASE = past_key_values[1] def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , ) -> Optional[int]: if attention_mask is None: __SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(UpperCAmelCase__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __SCREAMING_SNAKE_CASE = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class A__( __magic_name__ , __magic_name__ , unittest.TestCase ): lowerCAmelCase = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () lowerCAmelCase = (TFMBartForConditionalGeneration,) if is_tf_available() else () lowerCAmelCase = ( { '''conversational''': TFMBartForConditionalGeneration, '''feature-extraction''': TFMBartModel, '''summarization''': TFMBartForConditionalGeneration, '''text2text-generation''': TFMBartForConditionalGeneration, '''translation''': TFMBartForConditionalGeneration, } if is_tf_available() else {} ) lowerCAmelCase = True lowerCAmelCase = False lowerCAmelCase = False def _a ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]: """simple docstring""" if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def _a ( self : List[Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFMBartModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE ) def _a ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() def _a ( self : int ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__SCREAMING_SNAKE_CASE ) @require_sentencepiece @require_tokenizers @require_tf class A__( unittest.TestCase ): lowerCAmelCase = [ ''' UN Chief Says There Is No Military Solution in Syria''', ] lowerCAmelCase = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', ] lowerCAmelCase = '''facebook/mbart-large-en-ro''' @cached_property def _a ( self : Optional[int] ) -> str: """simple docstring""" return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def _a ( self : Optional[Any] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def _a ( self : Any , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.translate_src_text(**__SCREAMING_SNAKE_CASE ) self.assertListEqual(self.expected_text , __SCREAMING_SNAKE_CASE ) def _a ( self : Any , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , **__SCREAMING_SNAKE_CASE , return_tensors='''tf''' ) __SCREAMING_SNAKE_CASE = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) __SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) return generated_words @slow def _a ( self : Union[str, Any] ) -> Tuple: """simple docstring""" self._assert_generated_batch_equal_expected()
690
0
"""simple docstring""" import collections import os import re from pathlib import Path lowerCAmelCase__ ="src/transformers" # Matches is_xxx_available() lowerCAmelCase__ =re.compile(r"is\_([a-z_]*)_available()") # Catches a one-line _import_struct = {xxx} lowerCAmelCase__ =re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowerCAmelCase__ =re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]") # Catches a line if not is_foo_available lowerCAmelCase__ =re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)") # Catches a line _import_struct["bla"].append("foo") lowerCAmelCase__ =re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowerCAmelCase__ =re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]") # Catches a line with an object between quotes and a comma: "MyModel", lowerCAmelCase__ =re.compile(r"^\s+\"([^\"]+)\",") # Catches a line with objects between brackets only: ["foo", "bar"], lowerCAmelCase__ =re.compile(r"^\s+\[([^\]]+)\]") # Catches a line with from foo import bar, bla, boo lowerCAmelCase__ =re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") # Catches a line with try: lowerCAmelCase__ =re.compile(r"^\s*try:") # Catches a line with else: lowerCAmelCase__ =re.compile(r"^\s*else:") def _a ( UpperCAmelCase__ ) -> Union[str, Any]: if _re_test_backend.search(__UpperCamelCase ) is None: return None __SCREAMING_SNAKE_CASE = [b[0] for b in _re_backend.findall(__UpperCamelCase )] backends.sort() return "_and_".join(__UpperCamelCase ) def _a ( UpperCAmelCase__ ) -> List[str]: with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE = f.readlines() __SCREAMING_SNAKE_CASE = 0 while line_index < len(__UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(__UpperCamelCase ): return None # First grab the objects without a specific backend in _import_structure __SCREAMING_SNAKE_CASE = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: __SCREAMING_SNAKE_CASE = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(__UpperCamelCase ): __SCREAMING_SNAKE_CASE = _re_one_line_import_struct.search(__UpperCamelCase ).groups()[0] __SCREAMING_SNAKE_CASE = re.findall(r'''\[([^\]]+)\]''' , __UpperCamelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue __SCREAMING_SNAKE_CASE = _re_import_struct_key_value.search(__UpperCamelCase ) if single_line_import_search is not None: __SCREAMING_SNAKE_CASE = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(__UpperCamelCase ) > 0] objects.extend(__UpperCamelCase ) elif line.startswith(''' ''' * 8 + '''\"''' ): objects.append(line[9:-3] ) line_index += 1 __SCREAMING_SNAKE_CASE = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. __SCREAMING_SNAKE_CASE = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __SCREAMING_SNAKE_CASE = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __SCREAMING_SNAKE_CASE = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): __SCREAMING_SNAKE_CASE = lines[line_index] if _re_import_struct_add_one.search(__UpperCamelCase ) is not None: objects.append(_re_import_struct_add_one.search(__UpperCamelCase ).groups()[0] ) elif _re_import_struct_add_many.search(__UpperCamelCase ) is not None: __SCREAMING_SNAKE_CASE = _re_import_struct_add_many.search(__UpperCamelCase ).groups()[0].split(''', ''' ) __SCREAMING_SNAKE_CASE = [obj[1:-1] for obj in imports if len(__UpperCamelCase ) > 0] objects.extend(__UpperCamelCase ) elif _re_between_brackets.search(__UpperCamelCase ) is not None: __SCREAMING_SNAKE_CASE = _re_between_brackets.search(__UpperCamelCase ).groups()[0].split(''', ''' ) __SCREAMING_SNAKE_CASE = [obj[1:-1] for obj in imports if len(__UpperCamelCase ) > 0] objects.extend(__UpperCamelCase ) elif _re_quote_object.search(__UpperCamelCase ) is not None: objects.append(_re_quote_object.search(__UpperCamelCase ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''\"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''\"''' ): objects.append(line[13:-3] ) line_index += 1 __SCREAMING_SNAKE_CASE = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend __SCREAMING_SNAKE_CASE = [] while ( line_index < len(__UpperCamelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): __SCREAMING_SNAKE_CASE = lines[line_index] __SCREAMING_SNAKE_CASE = _re_import.search(__UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 __SCREAMING_SNAKE_CASE = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(__UpperCamelCase ): # If the line is an if is_backend_available, we grab all objects associated. __SCREAMING_SNAKE_CASE = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __SCREAMING_SNAKE_CASE = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __SCREAMING_SNAKE_CASE = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): __SCREAMING_SNAKE_CASE = lines[line_index] __SCREAMING_SNAKE_CASE = _re_import.search(__UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 __SCREAMING_SNAKE_CASE = objects else: line_index += 1 return import_dict_objects, type_hint_objects def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]: def find_duplicates(UpperCAmelCase__ ): return [k for k, v in collections.Counter(__UpperCamelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] __SCREAMING_SNAKE_CASE = [] for key in import_dict_objects.keys(): __SCREAMING_SNAKE_CASE = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) __SCREAMING_SNAKE_CASE = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): __SCREAMING_SNAKE_CASE = '''base imports''' if key == '''none''' else f"""{key} backend""" errors.append(f"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def _a ( ) -> Optional[int]: __SCREAMING_SNAKE_CASE = [] for root, _, files in os.walk(__UpperCamelCase ): if "__init__.py" in files: __SCREAMING_SNAKE_CASE = os.path.join(__UpperCamelCase , '''__init__.py''' ) __SCREAMING_SNAKE_CASE = parse_init(__UpperCamelCase ) if objects is not None: __SCREAMING_SNAKE_CASE = analyze_results(*__UpperCamelCase ) if len(__UpperCamelCase ) > 0: __SCREAMING_SNAKE_CASE = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('''\n'''.join(__UpperCamelCase ) ) if len(__UpperCamelCase ) > 0: raise ValueError('''\n\n'''.join(__UpperCamelCase ) ) def _a ( ) -> int: __SCREAMING_SNAKE_CASE = [] for path, directories, files in os.walk(__UpperCamelCase ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(__UpperCamelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(__UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0: continue __SCREAMING_SNAKE_CASE = str((Path(__UpperCamelCase ) / folder).relative_to(__UpperCamelCase ) ) __SCREAMING_SNAKE_CASE = short_path.replace(os.path.sep , '''.''' ) submodules.append(__UpperCamelCase ) for fname in files: if fname == "__init__.py": continue __SCREAMING_SNAKE_CASE = str((Path(__UpperCamelCase ) / fname).relative_to(__UpperCamelCase ) ) __SCREAMING_SNAKE_CASE = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(__UpperCamelCase ) return submodules lowerCAmelCase__ =[ "convert_pytorch_checkpoint_to_tf2", "modeling_flax_pytorch_utils", "models.esm.openfold_utils", ] def _a ( ) -> int: # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import __SCREAMING_SNAKE_CASE = direct_transformers_import(__UpperCamelCase ) __SCREAMING_SNAKE_CASE = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(__UpperCamelCase , '''__init__.py''' ) , '''r''' ) as f: __SCREAMING_SNAKE_CASE = f.read() import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , __UpperCamelCase ) ) ) __SCREAMING_SNAKE_CASE = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(__UpperCamelCase ) > 0: __SCREAMING_SNAKE_CASE = '''\n'''.join(f"""- {module}""" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' f"""{list_of_modules}\n""" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
713
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase__ =logging.get_logger(__name__) lowerCAmelCase__ ={ "Visual-Attention-Network/van-base": ( "https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json" ), } class A__( __magic_name__ ): lowerCAmelCase = '''van''' def __init__( self : int , __SCREAMING_SNAKE_CASE : Optional[Any]=2_24 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : Tuple=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE : Optional[int]=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE : str=[64, 1_28, 3_20, 5_12] , __SCREAMING_SNAKE_CASE : Optional[Any]=[3, 3, 12, 3] , __SCREAMING_SNAKE_CASE : Dict=[8, 8, 4, 4] , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : Dict=1E-6 , __SCREAMING_SNAKE_CASE : Any=1E-2 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , **__SCREAMING_SNAKE_CASE : str , ) -> List[str]: """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = patch_sizes __SCREAMING_SNAKE_CASE = strides __SCREAMING_SNAKE_CASE = hidden_sizes __SCREAMING_SNAKE_CASE = depths __SCREAMING_SNAKE_CASE = mlp_ratios __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = layer_scale_init_value __SCREAMING_SNAKE_CASE = drop_path_rate __SCREAMING_SNAKE_CASE = dropout_rate
690
0
"""simple docstring""" def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=False ) -> Tuple: if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = len(set_a.intersection(UpperCAmelCase__ ) ) if alternative_union: __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) + len(UpperCAmelCase__ ) else: __SCREAMING_SNAKE_CASE = len(set_a.union(UpperCAmelCase__ ) ) return intersection / union if isinstance(UpperCAmelCase__ , (list, tuple) ) and isinstance(UpperCAmelCase__ , (list, tuple) ): __SCREAMING_SNAKE_CASE = [element for element in set_a if element in set_b] if alternative_union: __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) + len(UpperCAmelCase__ ) return len(UpperCAmelCase__ ) / union else: __SCREAMING_SNAKE_CASE = set_a + [element for element in set_b if element not in set_a] return len(UpperCAmelCase__ ) / len(UpperCAmelCase__ ) return len(UpperCAmelCase__ ) / len(UpperCAmelCase__ ) return None if __name__ == "__main__": lowerCAmelCase__ ={"a", "b", "c", "d", "e"} lowerCAmelCase__ ={"c", "d", "e", "f", "h", "i"} print(jaccard_similarity(set_a, set_b))
714
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ ={"configuration_timm_backbone": ["TimmBackboneConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =["TimmBackbone"] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys lowerCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
690
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ =logging.get_logger(__name__) lowerCAmelCase__ ={ "junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json", "junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json", "junnyu/roformer_chinese_char_small": ( "https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json" ), "junnyu/roformer_chinese_char_base": ( "https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json" ), "junnyu/roformer_small_discriminator": ( "https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json" ), "junnyu/roformer_small_generator": ( "https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json" ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class A__( lowercase__ ): lowerCAmelCase = """roformer""" def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any]=5_00_00 , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=7_68 , __SCREAMING_SNAKE_CASE : Union[str, Any]=12 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : int=30_72 , __SCREAMING_SNAKE_CASE : Tuple="gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=15_36 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : Optional[int]=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=1E-1_2 , __SCREAMING_SNAKE_CASE : int=0 , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Any=True , **__SCREAMING_SNAKE_CASE : Dict , ) -> Tuple: """simple docstring""" super().__init__(pad_token_id=__lowercase , **__lowercase ) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size if embedding_size is None else embedding_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = rotary_value __SCREAMING_SNAKE_CASE = use_cache class A__( lowercase__ ): @property def _a ( self : Dict ) -> Optional[Any]: """simple docstring""" if self.task == "multiple-choice": __SCREAMING_SNAKE_CASE = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __SCREAMING_SNAKE_CASE = {0: '''batch''', 1: '''sequence'''} __SCREAMING_SNAKE_CASE = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
715
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase__ ={ "configuration_altclip": [ "ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "AltCLIPConfig", "AltCLIPTextConfig", "AltCLIPVisionConfig", ], "processing_altclip": ["AltCLIPProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =[ "ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "AltCLIPPreTrainedModel", "AltCLIPModel", "AltCLIPTextModel", "AltCLIPVisionModel", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys lowerCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
690
0
"""simple docstring""" from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowerCAmelCase__ =logging.get_logger(__name__) class A__( __snake_case ): lowerCAmelCase = ['input_values', 'padding_mask'] def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Any = 1 , __SCREAMING_SNAKE_CASE : int = 2_40_00 , __SCREAMING_SNAKE_CASE : List[Any] = 0.0 , __SCREAMING_SNAKE_CASE : Any = None , __SCREAMING_SNAKE_CASE : Any = None , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> Optional[int]: """simple docstring""" super().__init__(feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ ) __SCREAMING_SNAKE_CASE = chunk_length_s __SCREAMING_SNAKE_CASE = overlap @property def _a ( self : Optional[int] ) -> Optional[int]: """simple docstring""" if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def _a ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : int = False , __SCREAMING_SNAKE_CASE : Optional[Any] = None , __SCREAMING_SNAKE_CASE : Dict = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , ) -> Any: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with""" f""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) if padding and truncation: raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' ) elif padding is None: # by default let's pad the inputs __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = bool( isinstance(A_ , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) ) if is_batched: __SCREAMING_SNAKE_CASE = [np.asarray(A_ , dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(A_ , np.ndarray ): __SCREAMING_SNAKE_CASE = np.asarray(A_ , dtype=np.floataa ) elif isinstance(A_ , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): __SCREAMING_SNAKE_CASE = raw_audio.astype(np.floataa ) # always return batch if not is_batched: __SCREAMING_SNAKE_CASE = [np.asarray(A_ ).T] # verify inputs are valid for idx, example in enumerate(A_ ): if example.ndim > 2: raise ValueError(f"""Expected input shape (channels, length) but got shape {example.shape}""" ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(f"""Expected mono audio but example has {example.shape[-1]} channels""" ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(f"""Expected stereo audio but example has {example.shape[-1]} channels""" ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = BatchFeature({'''input_values''': raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: __SCREAMING_SNAKE_CASE = min(array.shape[0] for array in raw_audio ) __SCREAMING_SNAKE_CASE = int(np.floor(max_length / self.chunk_stride ) ) __SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: __SCREAMING_SNAKE_CASE = max(array.shape[0] for array in raw_audio ) __SCREAMING_SNAKE_CASE = int(np.ceil(max_length / self.chunk_stride ) ) __SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length __SCREAMING_SNAKE_CASE = "max_length" else: __SCREAMING_SNAKE_CASE = input_values # normal padding on batch if padded_inputs is None: __SCREAMING_SNAKE_CASE = self.pad( A_ , max_length=A_ , truncation=A_ , padding=A_ , return_attention_mask=A_ , ) if padding: __SCREAMING_SNAKE_CASE = padded_inputs.pop('''attention_mask''' ) __SCREAMING_SNAKE_CASE = [] for example in padded_inputs.pop('''input_values''' ): if self.feature_size == 1: __SCREAMING_SNAKE_CASE = example[..., None] input_values.append(example.T ) __SCREAMING_SNAKE_CASE = input_values if return_tensors is not None: __SCREAMING_SNAKE_CASE = padded_inputs.convert_to_tensors(A_ ) return padded_inputs
716
"""simple docstring""" import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class A__( unittest.TestCase ): def _a ( self : int ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = 0 def _a ( self : Tuple ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : str ) -> Optional[int]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json''' __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : List[str] ) -> Optional[int]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json''' __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : str ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = CLIPConfig() # Create a dummy config file with image_proceesor_type __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json''' __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ).to_dict() config_dict.pop('''image_processor_type''' ) __SCREAMING_SNAKE_CASE = CLIPImageProcessor(**__SCREAMING_SNAKE_CASE ) # save in new folder model_config.save_pretrained(__SCREAMING_SNAKE_CASE ) config.save_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) # make sure private variable is not incorrectly saved __SCREAMING_SNAKE_CASE = json.loads(config.to_json_string() ) self.assertTrue('''_processor_class''' not in dict_as_saved ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : Optional[int] ) -> Tuple: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : List[Any] ) -> str: """simple docstring""" with self.assertRaisesRegex( __SCREAMING_SNAKE_CASE , '''clip-base is not a local folder and is not a valid model identifier''' ): __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''clip-base''' ) def _a ( self : Any ) -> Optional[Any]: """simple docstring""" with self.assertRaisesRegex( __SCREAMING_SNAKE_CASE , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE , revision='''aaaaaa''' ) def _a ( self : Dict ) -> Dict: """simple docstring""" with self.assertRaisesRegex( __SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ): __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' ) def _a ( self : int ) -> Any: """simple docstring""" with self.assertRaises(__SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(__SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE , trust_remote_code=__SCREAMING_SNAKE_CASE ) self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' ) def _a ( self : Optional[Any] ) -> str: """simple docstring""" try: AutoConfig.register('''custom''' , __SCREAMING_SNAKE_CASE ) AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__SCREAMING_SNAKE_CASE ): AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json''' __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) ) __SCREAMING_SNAKE_CASE = CustomImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def _a ( self : int ) -> List[Any]: """simple docstring""" class A__( __magic_name__ ): lowerCAmelCase = True try: AutoConfig.register('''custom''' , __SCREAMING_SNAKE_CASE ) AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # If remote code is not set, the default is to use local __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(not hasattr(__SCREAMING_SNAKE_CASE , '''is_local''' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
690
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ =logging.get_logger(__name__) lowerCAmelCase__ ={ '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json''' ), } class A__( UpperCamelCase__ ): lowerCAmelCase = """dpr""" def __init__( self : int , __SCREAMING_SNAKE_CASE : Tuple=3_05_22 , __SCREAMING_SNAKE_CASE : int=7_68 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Tuple=12 , __SCREAMING_SNAKE_CASE : List[Any]=30_72 , __SCREAMING_SNAKE_CASE : int="gelu" , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=5_12 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Any=0.02 , __SCREAMING_SNAKE_CASE : Optional[int]=1E-1_2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0 , __SCREAMING_SNAKE_CASE : Optional[Any]="absolute" , __SCREAMING_SNAKE_CASE : Tuple = 0 , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> Tuple: """simple docstring""" super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = projection_dim __SCREAMING_SNAKE_CASE = position_embedding_type
717
"""simple docstring""" import math lowerCAmelCase__ =10 lowerCAmelCase__ =7 lowerCAmelCase__ =BALLS_PER_COLOUR * NUM_COLOURS def _a ( UpperCAmelCase__ = 20 ) -> str: __SCREAMING_SNAKE_CASE = math.comb(UpperCAmelCase__ , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = NUM_COLOURS * (1 - missing_colour / total) return f"""{result:.9f}""" if __name__ == "__main__": print(solution(20))
690
0
from __future__ import annotations lowerCAmelCase__ =10 def _a ( UpperCAmelCase__ ) -> Optional[int]: __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = max(__lowerCAmelCase ) while placement <= max_digit: # declare and initialize empty buckets __SCREAMING_SNAKE_CASE = [[] for _ in range(__lowerCAmelCase )] # split list_of_ints between the buckets for i in list_of_ints: __SCREAMING_SNAKE_CASE = int((i / placement) % RADIX ) buckets[tmp].append(__lowerCAmelCase ) # put each buckets' contents into list_of_ints __SCREAMING_SNAKE_CASE = 0 for b in range(__lowerCAmelCase ): for i in buckets[b]: __SCREAMING_SNAKE_CASE = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
718
"""simple docstring""" from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax lowerCAmelCase__ =logging.get_logger(__name__) @add_end_docstrings(__magic_name__ ) class A__( __magic_name__ ): def __init__( self : Optional[Any] , **__SCREAMING_SNAKE_CASE : str ) -> Optional[Any]: """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) requires_backends(self , '''vision''' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image", List["Image"]] , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple: """simple docstring""" return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _a ( self : int , **__SCREAMING_SNAKE_CASE : int ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = {} if "candidate_labels" in kwargs: __SCREAMING_SNAKE_CASE = kwargs['''candidate_labels'''] if "hypothesis_template" in kwargs: __SCREAMING_SNAKE_CASE = kwargs['''hypothesis_template'''] return preprocess_params, {}, {} def _a ( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]="This is a photo of {}." ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = load_image(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.image_processor(images=[image] , return_tensors=self.framework ) __SCREAMING_SNAKE_CASE = candidate_labels __SCREAMING_SNAKE_CASE = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels] __SCREAMING_SNAKE_CASE = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [text_inputs] return inputs def _a ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = model_inputs.pop('''candidate_labels''' ) __SCREAMING_SNAKE_CASE = model_inputs.pop('''text_inputs''' ) if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = text_inputs[0] else: # Batching case. __SCREAMING_SNAKE_CASE = text_inputs[0][0] __SCREAMING_SNAKE_CASE = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = { '''candidate_labels''': candidate_labels, '''logits''': outputs.logits_per_image, } return model_outputs def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = model_outputs.pop('''candidate_labels''' ) __SCREAMING_SNAKE_CASE = model_outputs['''logits'''][0] if self.framework == "pt": __SCREAMING_SNAKE_CASE = logits.softmax(dim=-1 ).squeeze(-1 ) __SCREAMING_SNAKE_CASE = probs.tolist() if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = [scores] elif self.framework == "tf": __SCREAMING_SNAKE_CASE = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 ) __SCREAMING_SNAKE_CASE = probs.numpy().tolist() else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) __SCREAMING_SNAKE_CASE = [ {'''score''': score, '''label''': candidate_label} for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : -x[0] ) ] return result
690
0
"""simple docstring""" import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow lowerCAmelCase__ =logging.getLogger() @unittest.skip('''Temporarily disable the doc tests.''' ) @require_torch @require_tf @slow class A__( unittest.TestCase ): def _a ( self : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Union[str, Any] = None , __SCREAMING_SNAKE_CASE : Union[str, Any] = True , ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = [file for file in os.listdir(lowerCAmelCase_ ) if os.path.isfile(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )] if identifier is not None: __SCREAMING_SNAKE_CASE = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): for n_ in n_identifier: __SCREAMING_SNAKE_CASE = [file for file in files if n_ not in file] else: __SCREAMING_SNAKE_CASE = [file for file in files if n_identifier not in file] __SCREAMING_SNAKE_CASE = ignore_files or [] ignore_files.append('''__init__.py''' ) __SCREAMING_SNAKE_CASE = [file for file in files if file not in ignore_files] for file in files: # Open all files print('''Testing''' , lowerCAmelCase_ ) if only_modules: __SCREAMING_SNAKE_CASE = file.split('''.''' )[0] try: __SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = doctest.DocTestSuite(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = unittest.TextTestRunner().run(lowerCAmelCase_ ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(f"""{module_identifier} is not a module.""" ) else: __SCREAMING_SNAKE_CASE = doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def _a ( self : int ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = Path('''src/transformers''' ) __SCREAMING_SNAKE_CASE = '''modeling''' __SCREAMING_SNAKE_CASE = [ '''modeling_ctrl.py''', '''modeling_tf_ctrl.py''', ] self.analyze_directory(lowerCAmelCase_ , identifier=lowerCAmelCase_ , ignore_files=lowerCAmelCase_ ) def _a ( self : Dict ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = Path('''src/transformers''' ) __SCREAMING_SNAKE_CASE = '''tokenization''' self.analyze_directory(lowerCAmelCase_ , identifier=lowerCAmelCase_ ) def _a ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = Path('''src/transformers''' ) __SCREAMING_SNAKE_CASE = '''configuration''' self.analyze_directory(lowerCAmelCase_ , identifier=lowerCAmelCase_ ) def _a ( self : Optional[int] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = Path('''src/transformers''' ) __SCREAMING_SNAKE_CASE = ['''configuration''', '''modeling''', '''tokenization'''] self.analyze_directory(lowerCAmelCase_ , n_identifier=lowerCAmelCase_ ) def _a ( self : Optional[int] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = Path('''docs/source''' ) __SCREAMING_SNAKE_CASE = ['''favicon.ico'''] self.analyze_directory(lowerCAmelCase_ , ignore_files=lowerCAmelCase_ , only_modules=lowerCAmelCase_ )
719
"""simple docstring""" from __future__ import annotations from collections.abc import Callable lowerCAmelCase__ =list[list[float | int]] def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> Matrix: __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [[0 for _ in range(size + 1 )] for _ in range(UpperCAmelCase__ )] __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for row in range(UpperCAmelCase__ ): for col in range(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = matrix[row][col] __SCREAMING_SNAKE_CASE = vector[row][0] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 while row < size and col < size: # pivoting __SCREAMING_SNAKE_CASE = max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCAmelCase__ , UpperCAmelCase__ ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = augmented[rowa][col] / augmented[row][col] __SCREAMING_SNAKE_CASE = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , UpperCAmelCase__ ): for row in range(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = augmented[row][col] / augmented[col][col] for cola in range(UpperCAmelCase__ , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(UpperCAmelCase__ ) ] def _a ( UpperCAmelCase__ ) -> Callable[[int], int]: __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [[0 for _ in range(UpperCAmelCase__ )] for _ in range(UpperCAmelCase__ )] __SCREAMING_SNAKE_CASE = [[0] for _ in range(UpperCAmelCase__ )] __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for x_val, y_val in enumerate(UpperCAmelCase__ ): for col in range(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = (x_val + 1) ** (size - col - 1) __SCREAMING_SNAKE_CASE = y_val __SCREAMING_SNAKE_CASE = solve(UpperCAmelCase__ , UpperCAmelCase__ ) def interpolated_func(UpperCAmelCase__ ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(UpperCAmelCase__ ) ) return interpolated_func def _a ( UpperCAmelCase__ ) -> int: return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def _a ( UpperCAmelCase__ = question_function , UpperCAmelCase__ = 10 ) -> int: __SCREAMING_SNAKE_CASE = [func(UpperCAmelCase__ ) for x_val in range(1 , order + 1 )] __SCREAMING_SNAKE_CASE = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for poly in polynomials: __SCREAMING_SNAKE_CASE = 1 while func(UpperCAmelCase__ ) == poly(UpperCAmelCase__ ): x_val += 1 ret += poly(UpperCAmelCase__ ) return ret if __name__ == "__main__": print(F'''{solution() = }''')
690
0
"""simple docstring""" from collections.abc import Callable class A__: def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Callable | None = None ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = [] # Stores indexes of each item for supporting updates and deletion. __SCREAMING_SNAKE_CASE = {} # Stores current size of heap. __SCREAMING_SNAKE_CASE = 0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. __SCREAMING_SNAKE_CASE = key or (lambda __SCREAMING_SNAKE_CASE : x) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int ) -> int | None: """simple docstring""" return int((i - 1) / 2 ) if i > 0 else None def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int ) -> int | None: """simple docstring""" __SCREAMING_SNAKE_CASE = int(2 * i + 1 ) return left if 0 < left < self.size else None def _a ( self : Any , __SCREAMING_SNAKE_CASE : int ) -> int | None: """simple docstring""" __SCREAMING_SNAKE_CASE = int(2 * i + 2 ) return right if 0 < right < self.size else None def _a ( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = ( self.pos_map[self.arr[j][0]], self.pos_map[self.arr[i][0]], ) # Then swap the items in the list. __SCREAMING_SNAKE_CASE = self.arr[j], self.arr[i] def _a ( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> bool: """simple docstring""" return self.arr[i][1] < self.arr[j][1] def _a ( self : Dict , __SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self._left(__a ) __SCREAMING_SNAKE_CASE = self._right(__a ) __SCREAMING_SNAKE_CASE = i if left is not None and not self._cmp(__a , __a ): __SCREAMING_SNAKE_CASE = left if right is not None and not self._cmp(__a , __a ): __SCREAMING_SNAKE_CASE = right return valid_parent def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = self._parent(__a ) while parent is not None and not self._cmp(__a , __a ): self._swap(__a , __a ) __SCREAMING_SNAKE_CASE = parent, self._parent(__a ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = self._get_valid_parent(__a ) while valid_parent != index: self._swap(__a , __a ) __SCREAMING_SNAKE_CASE = valid_parent, self._get_valid_parent(__a ) def _a ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> None: """simple docstring""" if item not in self.pos_map: return __SCREAMING_SNAKE_CASE = self.pos_map[item] __SCREAMING_SNAKE_CASE = [item, self.key(__a )] # Make sure heap is right in both up and down direction. # Ideally only one of them will make any change. self._heapify_up(__a ) self._heapify_down(__a ) def _a ( self : Dict , __SCREAMING_SNAKE_CASE : int ) -> None: """simple docstring""" if item not in self.pos_map: return __SCREAMING_SNAKE_CASE = self.pos_map[item] del self.pos_map[item] __SCREAMING_SNAKE_CASE = self.arr[self.size - 1] __SCREAMING_SNAKE_CASE = index self.size -= 1 # Make sure heap is right in both up and down direction. Ideally only one # of them will make any change- so no performance loss in calling both. if self.size > index: self._heapify_up(__a ) self._heapify_down(__a ) def _a ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = len(self.arr ) if arr_len == self.size: self.arr.append([item, self.key(__a )] ) else: __SCREAMING_SNAKE_CASE = [item, self.key(__a )] __SCREAMING_SNAKE_CASE = self.size self.size += 1 self._heapify_up(self.size - 1 ) def _a ( self : Union[str, Any] ) -> tuple | None: """simple docstring""" return self.arr[0] if self.size else None def _a ( self : Tuple ) -> tuple | None: """simple docstring""" __SCREAMING_SNAKE_CASE = self.get_top() if top_item_tuple: self.delete_item(top_item_tuple[0] ) return top_item_tuple def _a ( ) -> None: pass if __name__ == "__main__": import doctest doctest.testmod()
720
"""simple docstring""" from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def _a ( UpperCAmelCase__ = "isbn/0140328726" ) -> dict: __SCREAMING_SNAKE_CASE = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes if new_olid.count('''/''' ) != 1: __SCREAMING_SNAKE_CASE = f"""{olid} is not a valid Open Library olid""" raise ValueError(UpperCAmelCase__ ) return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json() def _a ( UpperCAmelCase__ ) -> dict: __SCREAMING_SNAKE_CASE = { '''title''': '''Title''', '''publish_date''': '''Publish date''', '''authors''': '''Authors''', '''number_of_pages''': '''Number of pages:''', '''first_sentence''': '''First sentence''', '''isbn_10''': '''ISBN (10)''', '''isbn_13''': '''ISBN (13)''', } __SCREAMING_SNAKE_CASE = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} __SCREAMING_SNAKE_CASE = [ get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors'''] ] __SCREAMING_SNAKE_CASE = data['''First sentence''']['''value'''] for key, value in data.items(): if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = ''', '''.join(UpperCAmelCase__ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: lowerCAmelCase__ =input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(F'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''') continue print(F'''\nSearching Open Library for ISBN: {isbn}...\n''') try: lowerCAmelCase__ =summarize_book(get_openlibrary_data(F'''isbn/{isbn}''')) print("\n".join(F'''{key}: {value}''' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(F'''Sorry, there are no results for ISBN: {isbn}.''')
690
0
"""simple docstring""" import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ =get_tests_dir("fixtures/test_sentencepiece.model") lowerCAmelCase__ ={"target_lang": "fi", "source_lang": "en"} lowerCAmelCase__ =">>zh<<" lowerCAmelCase__ ="Helsinki-NLP/" if is_torch_available(): lowerCAmelCase__ ="pt" elif is_tf_available(): lowerCAmelCase__ ="tf" else: lowerCAmelCase__ ="jax" @require_sentencepiece class A__( __magic_name__ , unittest.TestCase ): lowerCAmelCase = MarianTokenizer lowerCAmelCase = False lowerCAmelCase = True def _a ( self : List[Any] ) -> int: """simple docstring""" super().setUp() __SCREAMING_SNAKE_CASE = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>'''] __SCREAMING_SNAKE_CASE = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) __SCREAMING_SNAKE_CASE = Path(self.tmpdirname ) save_json(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab'''] ) save_json(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''source_spm'''] ) copyfile(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''target_spm'''] ) __SCREAMING_SNAKE_CASE = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def _a ( self : str , **__SCREAMING_SNAKE_CASE : int ) -> MarianTokenizer: """simple docstring""" return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> int: """simple docstring""" return ( "This is a test", "This is a test", ) def _a ( self : Any ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = '''</s>''' __SCREAMING_SNAKE_CASE = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ ) def _a ( self : Any ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''</s>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''<pad>''' ) self.assertEqual(len(UpperCAmelCase__ ) , 9 ) def _a ( self : Optional[int] ) -> List[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def _a ( self : str ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = MarianTokenizer.from_pretrained(f"""{ORG_NAME}opus-mt-en-de""" ) __SCREAMING_SNAKE_CASE = en_de_tokenizer(['''I am a small frog'''] , return_tensors=UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [38, 1_21, 14, 6_97, 3_88_48, 0] self.assertListEqual(UpperCAmelCase__ , batch.input_ids[0] ) __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [x.name for x in Path(UpperCAmelCase__ ).glob('''*''' )] self.assertIn('''source.spm''' , UpperCAmelCase__ ) MarianTokenizer.from_pretrained(UpperCAmelCase__ ) def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.get_tokenizer() __SCREAMING_SNAKE_CASE = tok( ['''I am a small frog''' * 10_00, '''I am a small frog'''] , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual(batch.input_ids.shape , (2, 5_12) ) def _a ( self : Any ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.get_tokenizer() __SCREAMING_SNAKE_CASE = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual(batch_smaller.input_ids.shape , (2, 10) ) @slow def _a ( self : int ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = {'''input_ids''': [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , ) def _a ( self : int ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' ) __SCREAMING_SNAKE_CASE = '''Tämä on testi''' __SCREAMING_SNAKE_CASE = '''This is a test''' __SCREAMING_SNAKE_CASE = [76, 7, 20_47, 2] __SCREAMING_SNAKE_CASE = [69, 12, 11, 9_40, 2] __SCREAMING_SNAKE_CASE = tokenizer(UpperCAmelCase__ ).input_ids self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer(text_target=UpperCAmelCase__ ).input_ids self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
721
"""simple docstring""" from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCAmelCase__ =logging.get_logger(__name__) class A__( __magic_name__ ): lowerCAmelCase = ['''audio_values''', '''audio_mask'''] def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any]=20_48 , __SCREAMING_SNAKE_CASE : str=1 , __SCREAMING_SNAKE_CASE : List[Any]=[16, 16] , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_28 , __SCREAMING_SNAKE_CASE : int=4_41_00 , __SCREAMING_SNAKE_CASE : Union[str, Any]=86 , __SCREAMING_SNAKE_CASE : str=20_48 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> Any: """simple docstring""" super().__init__( feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = spectrogram_length __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = patch_size __SCREAMING_SNAKE_CASE = feature_size // self.patch_size[1] __SCREAMING_SNAKE_CASE = n_fft __SCREAMING_SNAKE_CASE = sampling_rate // hop_length_to_sampling_rate __SCREAMING_SNAKE_CASE = sampling_rate __SCREAMING_SNAKE_CASE = padding_value __SCREAMING_SNAKE_CASE = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , ).T def _a ( self : str , __SCREAMING_SNAKE_CASE : np.array ) -> np.ndarray: """simple docstring""" __SCREAMING_SNAKE_CASE = spectrogram( __SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , ) __SCREAMING_SNAKE_CASE = log_spec[:, :-1] __SCREAMING_SNAKE_CASE = log_spec - 20.0 __SCREAMING_SNAKE_CASE = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : str , __SCREAMING_SNAKE_CASE : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = True , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , **__SCREAMING_SNAKE_CASE : Tuple , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( '''This feature extractor is set to support sampling rate''' f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled""" f""" with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) __SCREAMING_SNAKE_CASE = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) __SCREAMING_SNAKE_CASE = is_batched_numpy or ( isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __SCREAMING_SNAKE_CASE = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ): __SCREAMING_SNAKE_CASE = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __SCREAMING_SNAKE_CASE = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __SCREAMING_SNAKE_CASE = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis __SCREAMING_SNAKE_CASE = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask __SCREAMING_SNAKE_CASE = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: __SCREAMING_SNAKE_CASE = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] __SCREAMING_SNAKE_CASE = np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa ) # convert into correct format for padding __SCREAMING_SNAKE_CASE = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch __SCREAMING_SNAKE_CASE = np.ones([len(__SCREAMING_SNAKE_CASE ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) __SCREAMING_SNAKE_CASE = padded_audio_features * self.padding_value for i in range(len(__SCREAMING_SNAKE_CASE ) ): __SCREAMING_SNAKE_CASE = audio_features[i] __SCREAMING_SNAKE_CASE = feature # return as BatchFeature if return_attention_mask: __SCREAMING_SNAKE_CASE = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask} else: __SCREAMING_SNAKE_CASE = {'''audio_values''': padded_audio_features} __SCREAMING_SNAKE_CASE = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE ) return encoded_inputs
690
0
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class A__( unittest.TestCase ): def __init__( self : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=7 , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : List[str]=18 , __SCREAMING_SNAKE_CASE : Any=30 , __SCREAMING_SNAKE_CASE : List[str]=4_00 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : str=True , ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = size if size is not None else {'''shortest_edge''': 20} __SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = min_resolution __SCREAMING_SNAKE_CASE = max_resolution __SCREAMING_SNAKE_CASE = do_resize __SCREAMING_SNAKE_CASE = size __SCREAMING_SNAKE_CASE = do_center_crop __SCREAMING_SNAKE_CASE = crop_size __SCREAMING_SNAKE_CASE = do_flip_channel_order def _a ( self : List[Any] ) -> Tuple: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class A__( __a , unittest.TestCase ): lowerCAmelCase = MobileViTImageProcessor if is_vision_available() else None def _a ( self : List[Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = MobileViTImageProcessingTester(self ) @property def _a ( self : Union[str, Any] ) -> int: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _a ( self : str ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A__ , '''do_resize''' ) ) self.assertTrue(hasattr(A__ , '''size''' ) ) self.assertTrue(hasattr(A__ , '''do_center_crop''' ) ) self.assertTrue(hasattr(A__ , '''center_crop''' ) ) self.assertTrue(hasattr(A__ , '''do_flip_channel_order''' ) ) def _a ( self : List[str] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def _a ( self : Tuple ) -> Any: """simple docstring""" pass def _a ( self : str ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ ) for image in image_inputs: self.assertIsInstance(A__ , Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE = image_processing(A__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def _a ( self : Optional[Any] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ ) for image in image_inputs: self.assertIsInstance(A__ , np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE = image_processing(A__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def _a ( self : Union[str, Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ ) for image in image_inputs: self.assertIsInstance(A__ , torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE = image_processing(A__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
700
"""simple docstring""" def _a ( UpperCAmelCase__ ) -> str: __SCREAMING_SNAKE_CASE = '''''' for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def _a ( UpperCAmelCase__ ) -> dict[str, str]: __SCREAMING_SNAKE_CASE = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key __SCREAMING_SNAKE_CASE = remove_duplicates(key.upper() ) __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) # First fill cipher with key characters __SCREAMING_SNAKE_CASE = {alphabet[i]: char for i, char in enumerate(UpperCAmelCase__ )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(UpperCAmelCase__ ) , 26 ): __SCREAMING_SNAKE_CASE = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 __SCREAMING_SNAKE_CASE = alphabet[i - offset] __SCREAMING_SNAKE_CASE = char return cipher_alphabet def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> str: return "".join(cipher_map.get(UpperCAmelCase__ , UpperCAmelCase__ ) for ch in message.upper() ) def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> str: __SCREAMING_SNAKE_CASE = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(UpperCAmelCase__ , UpperCAmelCase__ ) for ch in message.upper() ) def _a ( ) -> None: __SCREAMING_SNAKE_CASE = input('''Enter message to encode or decode: ''' ).strip() __SCREAMING_SNAKE_CASE = input('''Enter keyword: ''' ).strip() __SCREAMING_SNAKE_CASE = input('''Encipher or decipher? E/D:''' ).strip()[0].lower() try: __SCREAMING_SNAKE_CASE = {'''e''': encipher, '''d''': decipher}[option] except KeyError: raise KeyError('''invalid input option''' ) __SCREAMING_SNAKE_CASE = create_cipher_map(UpperCAmelCase__ ) print(func(UpperCAmelCase__ , UpperCAmelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
690
0
"""simple docstring""" import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class A__( unittest.TestCase ): def __init__( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : Dict=30 , __SCREAMING_SNAKE_CASE : List[str]=4_00 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Any=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : str=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Optional[int]=1 / 2_55 , __SCREAMING_SNAKE_CASE : str=True , ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33} __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = min_resolution __SCREAMING_SNAKE_CASE = max_resolution __SCREAMING_SNAKE_CASE = do_resize __SCREAMING_SNAKE_CASE = size __SCREAMING_SNAKE_CASE = do_normalize __SCREAMING_SNAKE_CASE = image_mean __SCREAMING_SNAKE_CASE = image_std __SCREAMING_SNAKE_CASE = do_rescale __SCREAMING_SNAKE_CASE = rescale_factor __SCREAMING_SNAKE_CASE = do_pad def _a ( self : str ) -> List[str]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple=False ) -> List[Any]: """simple docstring""" if not batched: __SCREAMING_SNAKE_CASE = image_inputs[0] if isinstance(__SCREAMING_SNAKE_CASE , Image.Image ): __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = image.size else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2] if w < h: __SCREAMING_SNAKE_CASE = int(self.size['''shortest_edge'''] * h / w ) __SCREAMING_SNAKE_CASE = self.size['''shortest_edge'''] elif w > h: __SCREAMING_SNAKE_CASE = self.size['''shortest_edge'''] __SCREAMING_SNAKE_CASE = int(self.size['''shortest_edge'''] * w / h ) else: __SCREAMING_SNAKE_CASE = self.size['''shortest_edge'''] __SCREAMING_SNAKE_CASE = self.size['''shortest_edge'''] else: __SCREAMING_SNAKE_CASE = [] for image in image_inputs: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __SCREAMING_SNAKE_CASE = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[0] )[0] __SCREAMING_SNAKE_CASE = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class A__( __magic_name__ , unittest.TestCase ): lowerCAmelCase = DeformableDetrImageProcessor if is_vision_available() else None def _a ( self : str ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = DeformableDetrImageProcessingTester(self ) @property def _a ( self : Any ) -> Optional[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _a ( self : Dict ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_rescale''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_pad''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) ) def _a ( self : Tuple ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} ) self.assertEqual(image_processor.do_pad , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , __SCREAMING_SNAKE_CASE ) def _a ( self : List[Any] ) -> List[str]: """simple docstring""" pass def _a ( self : Optional[int] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _a ( self : Any ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __SCREAMING_SNAKE_CASE = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _a ( self : Dict ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __SCREAMING_SNAKE_CASE = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def _a ( self : Optional[int] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: __SCREAMING_SNAKE_CASE = json.loads(f.read() ) __SCREAMING_SNAKE_CASE = {'''image_id''': 3_97_69, '''annotations''': target} # encode them __SCREAMING_SNAKE_CASE = DeformableDetrImageProcessor() __SCREAMING_SNAKE_CASE = image_processing(images=__SCREAMING_SNAKE_CASE , annotations=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) # verify pixel values __SCREAMING_SNAKE_CASE = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) ) # verify area __SCREAMING_SNAKE_CASE = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __SCREAMING_SNAKE_CASE ) ) # verify boxes __SCREAMING_SNAKE_CASE = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __SCREAMING_SNAKE_CASE , atol=1E-3 ) ) # verify image_id __SCREAMING_SNAKE_CASE = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __SCREAMING_SNAKE_CASE ) ) # verify is_crowd __SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __SCREAMING_SNAKE_CASE ) ) # verify class_labels __SCREAMING_SNAKE_CASE = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __SCREAMING_SNAKE_CASE ) ) # verify orig_size __SCREAMING_SNAKE_CASE = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __SCREAMING_SNAKE_CASE ) ) # verify size __SCREAMING_SNAKE_CASE = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __SCREAMING_SNAKE_CASE ) ) @slow def _a ( self : Tuple ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: __SCREAMING_SNAKE_CASE = json.loads(f.read() ) __SCREAMING_SNAKE_CASE = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target} __SCREAMING_SNAKE_CASE = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them __SCREAMING_SNAKE_CASE = DeformableDetrImageProcessor(format='''coco_panoptic''' ) __SCREAMING_SNAKE_CASE = image_processing(images=__SCREAMING_SNAKE_CASE , annotations=__SCREAMING_SNAKE_CASE , masks_path=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) # verify pixel values __SCREAMING_SNAKE_CASE = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) ) # verify area __SCREAMING_SNAKE_CASE = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __SCREAMING_SNAKE_CASE ) ) # verify boxes __SCREAMING_SNAKE_CASE = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __SCREAMING_SNAKE_CASE , atol=1E-3 ) ) # verify image_id __SCREAMING_SNAKE_CASE = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __SCREAMING_SNAKE_CASE ) ) # verify is_crowd __SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __SCREAMING_SNAKE_CASE ) ) # verify class_labels __SCREAMING_SNAKE_CASE = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __SCREAMING_SNAKE_CASE ) ) # verify masks __SCREAMING_SNAKE_CASE = 82_28_73 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __SCREAMING_SNAKE_CASE ) # verify orig_size __SCREAMING_SNAKE_CASE = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __SCREAMING_SNAKE_CASE ) ) # verify size __SCREAMING_SNAKE_CASE = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __SCREAMING_SNAKE_CASE ) )
701
"""simple docstring""" from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__: def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : Optional[Any]=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=10 , __SCREAMING_SNAKE_CASE : str=[10, 20, 30, 40] , __SCREAMING_SNAKE_CASE : Optional[int]=[1, 1, 2, 1] , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Optional[Any]="relu" , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = embeddings_size __SCREAMING_SNAKE_CASE = hidden_sizes __SCREAMING_SNAKE_CASE = depths __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = scope __SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE ) def _a ( self : List[Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels ) __SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values, labels def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def _a ( self : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = TFRegNetModel(config=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _a ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = TFRegNetForImageClassification(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self : Optional[Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs __SCREAMING_SNAKE_CASE = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class A__( __magic_name__ , __magic_name__ , unittest.TestCase ): lowerCAmelCase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () lowerCAmelCase = ( {'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification} if is_tf_available() else {} ) lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False def _a ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFRegNetModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple ) -> Optional[Any]: """simple docstring""" return @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def _a ( self : Any ) -> Optional[Any]: """simple docstring""" pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , ) @slow def _a ( self : Dict ) -> List[Any]: """simple docstring""" super().test_keras_fit() @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def _a ( self : Dict ) -> Union[str, Any]: """simple docstring""" pass def _a ( self : List[Any] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] __SCREAMING_SNAKE_CASE = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE ) def _a ( self : Any ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def _a ( self : List[str] ) -> Tuple: """simple docstring""" def check_hidden_states_output(__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ): __SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , training=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __SCREAMING_SNAKE_CASE = self.model_tester.num_stages self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: __SCREAMING_SNAKE_CASE = layer_type __SCREAMING_SNAKE_CASE = True check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __SCREAMING_SNAKE_CASE = True check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(__SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]={} ): __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).to_tuple() def recursive_check(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict ): if isinstance(__SCREAMING_SNAKE_CASE , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): recursive_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) , msg=( '''Tuple and dict output are not equal. Difference:''' f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}""" ) , ) recursive_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE ) check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , {'''output_hidden_states''': True} ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE ) check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , {'''output_hidden_states''': True} ) def _a ( self : str ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE ) @slow def _a ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = TFRegNetModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def _a ( ) -> Dict: __SCREAMING_SNAKE_CASE = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class A__( unittest.TestCase ): @cached_property def _a ( self : List[Any] ) -> str: """simple docstring""" return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _a ( self : List[str] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) __SCREAMING_SNAKE_CASE = self.default_image_processor __SCREAMING_SNAKE_CASE = prepare_img() __SCREAMING_SNAKE_CASE = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''tf''' ) # forward pass __SCREAMING_SNAKE_CASE = model(**__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE ) # verify the logits __SCREAMING_SNAKE_CASE = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tf.constant([-0.41_80, -1.50_51, -3.48_36] ) tf.debugging.assert_near(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
690
0
"""simple docstring""" def _a ( UpperCAmelCase__ ) -> Union[str, Any]: if not head: return True # split the list to two parts __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = head.next, head while fast and fast.next: __SCREAMING_SNAKE_CASE = fast.next.next __SCREAMING_SNAKE_CASE = slow.next __SCREAMING_SNAKE_CASE = slow.next __SCREAMING_SNAKE_CASE = None # Don't forget here! But forget still works! # reverse the second part __SCREAMING_SNAKE_CASE = None while second: __SCREAMING_SNAKE_CASE = second.next __SCREAMING_SNAKE_CASE = node __SCREAMING_SNAKE_CASE = second __SCREAMING_SNAKE_CASE = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False __SCREAMING_SNAKE_CASE = node.next __SCREAMING_SNAKE_CASE = head.next return True def _a ( UpperCAmelCase__ ) -> Tuple: if not head or not head.next: return True # 1. Get the midpoint (slow) __SCREAMING_SNAKE_CASE = __SCREAMING_SNAKE_CASE = __SCREAMING_SNAKE_CASE = head while fast and fast.next: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = fast.next.next, slow.next # 2. Push the second half into the stack __SCREAMING_SNAKE_CASE = [slow.val] while slow.next: __SCREAMING_SNAKE_CASE = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False __SCREAMING_SNAKE_CASE = cur.next return True def _a ( UpperCAmelCase__ ) -> Any: if not head or not head.next: return True __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = 0 while head: if head.val in d: d[head.val].append(lowercase__ ) else: __SCREAMING_SNAKE_CASE = [pos] __SCREAMING_SNAKE_CASE = head.next pos += 1 __SCREAMING_SNAKE_CASE = pos - 1 __SCREAMING_SNAKE_CASE = 0 for v in d.values(): if len(lowercase__ ) % 2 != 0: middle += 1 else: __SCREAMING_SNAKE_CASE = 0 for i in range(0 , len(lowercase__ ) ): if v[i] + v[len(lowercase__ ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
702
"""simple docstring""" import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ =get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class A__( __magic_name__ , unittest.TestCase ): lowerCAmelCase = XLMRobertaTokenizer lowerCAmelCase = XLMRobertaTokenizerFast lowerCAmelCase = True lowerCAmelCase = True def _a ( self : Optional[int] ) -> List[Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE ) tokenizer.save_pretrained(self.tmpdirname ) def _a ( self : str ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = '''<pad>''' __SCREAMING_SNAKE_CASE = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) def _a ( self : int ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_02 ) def _a ( self : Tuple ) -> Optional[int]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 10_02 ) def _a ( self : int ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) __SCREAMING_SNAKE_CASE = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( __SCREAMING_SNAKE_CASE , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) __SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) self.assertListEqual( __SCREAMING_SNAKE_CASE , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) __SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ) self.assertListEqual( __SCREAMING_SNAKE_CASE , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def _a ( self : int ) -> Tuple: """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __SCREAMING_SNAKE_CASE = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) __SCREAMING_SNAKE_CASE = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Checks everything loads correctly in the same way __SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(__SCREAMING_SNAKE_CASE ) # Save tokenizer rust, legacy_format=True __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE ) # Checks it save with the same files self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Checks everything loads correctly in the same way __SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) shutil.rmtree(__SCREAMING_SNAKE_CASE ) # Save tokenizer rust, legacy_format=False __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way __SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) shutil.rmtree(__SCREAMING_SNAKE_CASE ) @cached_property def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' ) def _a ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" with tempfile.NamedTemporaryFile() as f: shutil.copyfile(__SCREAMING_SNAKE_CASE , f.name ) __SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(f.name , keep_accents=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = pickle.dumps(__SCREAMING_SNAKE_CASE ) pickle.loads(__SCREAMING_SNAKE_CASE ) def _a ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" if not self.test_rust_tokenizer: return __SCREAMING_SNAKE_CASE = self.get_tokenizer() __SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE = '''I was born in 92000, and this is falsé.''' __SCREAMING_SNAKE_CASE = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE = tokenizer.encode(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @slow def _a ( self : Any ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = '''Hello World!''' __SCREAMING_SNAKE_CASE = [0, 3_53_78, 66_61, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) ) @slow def _a ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) __SCREAMING_SNAKE_CASE = [ 0, 32_93, 83, 10, 45_52, 49_89, 79_86, 6_78, 10, 59_15, 1_11, 17_94_59, 12_48_50, 4, 60_44, 2_37, 12, 6, 5, 6, 4, 67_80, 7_05, 15, 13_88, 44, 3_78, 1_01_14, 7_11, 1_52, 20, 6, 5, 2_23_76, 6_42, 12_21, 1_51_90, 3_41_53, 4_50, 56_08, 9_59, 11_19, 5_77_02, 1_36, 1_86, 47, 10_98, 2_93_67, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 60_44, 2_37, 62_84, 5_09_01, 5_28, 31, 90, 34, 9_27, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) ) @slow def _a ( self : Optional[int] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = {'''input_ids''': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
690
0
"""simple docstring""" import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class A__( unittest.TestCase ): def _a ( self : Optional[Any] ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) __SCREAMING_SNAKE_CASE = Vector() def _a ( self : List[Any] ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(lowercase__ ) , '''(0,0,0,0,0,1)''' ) def _a ( self : Union[str, Any] ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = Vector([1, 2, 3, 4] ) self.assertEqual(len(lowercase__ ) , 4 ) def _a ( self : Any ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = Vector([1, 2] ) __SCREAMING_SNAKE_CASE = Vector([1, 2, 3, 4, 5] ) __SCREAMING_SNAKE_CASE = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) __SCREAMING_SNAKE_CASE = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.2_36 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.4_16 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.6_16 , 3 ) def _a ( self : Dict ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = Vector([1, 2, 3] ) __SCREAMING_SNAKE_CASE = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def _a ( self : Dict ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = Vector([1, 2, 3] ) __SCREAMING_SNAKE_CASE = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def _a ( self : Any ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = Vector([1, 2, 3] ) __SCREAMING_SNAKE_CASE = Vector([2, -1, 4] ) # for test of dot product __SCREAMING_SNAKE_CASE = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' ) self.assertEqual((a * b) , 0 ) def _a ( self : Any ) -> None: """simple docstring""" self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 ) def _a ( self : Dict ) -> None: """simple docstring""" self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' ) def _a ( self : str ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = Vector([1, 2, 3] ) __SCREAMING_SNAKE_CASE = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , lowercase__ , lowercase__ ) ) , '''(3,4,7)''' ) def _a ( self : List[Any] ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = Vector([1, 0, 0, 0, 0, 0] ) __SCREAMING_SNAKE_CASE = x.copy() self.assertEqual(str(lowercase__ ) , str(lowercase__ ) ) def _a ( self : Optional[int] ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(lowercase__ ) , '''(0,1,0)''' ) def _a ( self : Any ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(lowercase__ ) ) def _a ( self : int ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __SCREAMING_SNAKE_CASE = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(lowercase__ , lowercase__ ) ) def _a ( self : Dict ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __SCREAMING_SNAKE_CASE = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(lowercase__ , lowercase__ ) ) def _a ( self : Optional[int] ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def _a ( self : List[Any] ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) __SCREAMING_SNAKE_CASE = Vector([1, 2, 3] ) self.assertEqual('''(14,32,50)''' , str(a * x ) ) self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) ) def _a ( self : Union[str, Any] ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(lowercase__ ) ) def _a ( self : Union[str, Any] ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.01 ) def _a ( self : str ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __SCREAMING_SNAKE_CASE = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) ) def _a ( self : Any ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __SCREAMING_SNAKE_CASE = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) ) def _a ( self : int ) -> None: """simple docstring""" self.assertEqual( '''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
703
"""simple docstring""" from __future__ import annotations lowerCAmelCase__ =8.9_8_8E9 # units = N * m^s * C^-2 def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> dict[str, float]: __SCREAMING_SNAKE_CASE = abs(chargea * chargea ) if (force, chargea, chargea, distance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if distance < 0: raise ValueError('''Distance cannot be negative''' ) if force == 0: __SCREAMING_SNAKE_CASE = COULOMBS_CONSTANT * charge_product / (distance**2) return {"force": force} elif chargea == 0: __SCREAMING_SNAKE_CASE = abs(UpperCAmelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge1": chargea} elif chargea == 0: __SCREAMING_SNAKE_CASE = abs(UpperCAmelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge2": chargea} elif distance == 0: __SCREAMING_SNAKE_CASE = (COULOMBS_CONSTANT * charge_product / abs(UpperCAmelCase__ )) ** 0.5 return {"distance": distance} raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
690
0
"""simple docstring""" def _a ( UpperCAmelCase__ ) -> Optional[Any]: return "".join([hex(SCREAMING_SNAKE_CASE__ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE__ )] ) def _a ( UpperCAmelCase__ ) -> str: if (len(SCREAMING_SNAKE_CASE__ ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(SCREAMING_SNAKE_CASE__ ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
704
"""simple docstring""" import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ =logging.get_logger(__name__) def _a ( UpperCAmelCase__ ) -> Tuple: __SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , map_location='''cpu''' ) if "model" in sd.keys(): __SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , map_location='''cpu''' )['''model'''] # pop unnecessary weights __SCREAMING_SNAKE_CASE = [ '''decoder.version''', '''decoder.output_projection.weight''', ] for key in keys_to_delete: if key in sd: sd.pop(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = { '''decoder.project_in_dim.weight''': '''decoder.project_in.weight''', '''decoder.project_out_dim.weight''': '''decoder.project_out.weight''', '''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''', } for old_key, new_key in keys_to_rename.items(): if old_key in sd: __SCREAMING_SNAKE_CASE = sd.pop(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = list(sd.keys() ) for key in keys: if ".qkv_proj." in key: __SCREAMING_SNAKE_CASE = sd[key] # We split QKV in separate Q,K,V __SCREAMING_SNAKE_CASE = key.replace('''.qkv_proj.''' , '''.q_proj.''' ) __SCREAMING_SNAKE_CASE = key.replace('''.qkv_proj.''' , '''.k_proj.''' ) __SCREAMING_SNAKE_CASE = key.replace('''.qkv_proj.''' , '''.v_proj.''' ) __SCREAMING_SNAKE_CASE = value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch.split(UpperCAmelCase__ , depth // 3 , dim=0 ) __SCREAMING_SNAKE_CASE = q __SCREAMING_SNAKE_CASE = k __SCREAMING_SNAKE_CASE = v del sd[key] return sd @torch.no_grad() def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = load_checkpoint(UpperCAmelCase__ ) if config is not None: __SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained(UpperCAmelCase__ ) else: __SCREAMING_SNAKE_CASE = OPTConfig() __SCREAMING_SNAKE_CASE = OPTModel(UpperCAmelCase__ ).half().eval() model.load_state_dict(UpperCAmelCase__ ) # Check results Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ ) model.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": lowerCAmelCase__ =argparse.ArgumentParser() # Required parameters parser.add_argument( "--fairseq_path", type=str, help=( "path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:" " https://huggingface.co/models?other=opt_metasq" ), ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.") lowerCAmelCase__ =parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
690
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) lowerCAmelCase__ ={ "configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"], "processing_layoutlmv2": ["LayoutLMv2Processor"], "tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =["LayoutLMv2TokenizerFast"] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =["LayoutLMv2FeatureExtractor"] lowerCAmelCase__ =["LayoutLMv2ImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =[ "LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST", "LayoutLMv2ForQuestionAnswering", "LayoutLMv2ForSequenceClassification", "LayoutLMv2ForTokenClassification", "LayoutLMv2Layer", "LayoutLMv2Model", "LayoutLMv2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys lowerCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
705
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class A__( __magic_name__ ): lowerCAmelCase = '''naver-clova-ix/donut-base-finetuned-docvqa''' lowerCAmelCase = ( '''This is a tool that answers a question about an document (pdf). It takes an input named `document` which ''' '''should be the document containing the information, as well as a `question` that is the question about the ''' '''document. It returns a text that contains the answer to the question.''' ) lowerCAmelCase = '''document_qa''' lowerCAmelCase = AutoProcessor lowerCAmelCase = VisionEncoderDecoderModel lowerCAmelCase = ['''image''', '''text'''] lowerCAmelCase = ['''text'''] def __init__( self : str , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Any: """simple docstring""" if not is_vision_available(): raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' ) super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : "Image" , __SCREAMING_SNAKE_CASE : str ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>''' __SCREAMING_SNAKE_CASE = task_prompt.replace('''{user_input}''' , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.pre_processor.tokenizer( __SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_ids __SCREAMING_SNAKE_CASE = self.pre_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple: """simple docstring""" return self.model.generate( inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__SCREAMING_SNAKE_CASE , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__SCREAMING_SNAKE_CASE , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__SCREAMING_SNAKE_CASE , ).sequences def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.pre_processor.batch_decode(__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' ) __SCREAMING_SNAKE_CASE = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' ) __SCREAMING_SNAKE_CASE = re.sub(r'''<.*?>''' , '''''' , __SCREAMING_SNAKE_CASE , count=1 ).strip() # remove first task start token __SCREAMING_SNAKE_CASE = self.pre_processor.tokenajson(__SCREAMING_SNAKE_CASE ) return sequence["answer"]
690
0
"""simple docstring""" import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ =logging.get_logger(__name__) lowerCAmelCase__ ={ """facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""", """facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""", } class A__( __magic_name__ ): lowerCAmelCase = "encodec" def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : int=[1.5, 3.0, 6.0, 12.0, 24.0] , __SCREAMING_SNAKE_CASE : Tuple=2_40_00 , __SCREAMING_SNAKE_CASE : List[Any]=1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : str=1_28 , __SCREAMING_SNAKE_CASE : Any=32 , __SCREAMING_SNAKE_CASE : Any=1 , __SCREAMING_SNAKE_CASE : List[Any]=[8, 5, 4, 2] , __SCREAMING_SNAKE_CASE : Union[str, Any]="weight_norm" , __SCREAMING_SNAKE_CASE : str=7 , __SCREAMING_SNAKE_CASE : Optional[int]=7 , __SCREAMING_SNAKE_CASE : Any=3 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[str]="reflect" , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1.0 , __SCREAMING_SNAKE_CASE : Optional[Any]=10_24 , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : str=True , **__SCREAMING_SNAKE_CASE : str , ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = target_bandwidths __SCREAMING_SNAKE_CASE = sampling_rate __SCREAMING_SNAKE_CASE = audio_channels __SCREAMING_SNAKE_CASE = normalize __SCREAMING_SNAKE_CASE = chunk_length_s __SCREAMING_SNAKE_CASE = overlap __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_filters __SCREAMING_SNAKE_CASE = num_residual_layers __SCREAMING_SNAKE_CASE = upsampling_ratios __SCREAMING_SNAKE_CASE = norm_type __SCREAMING_SNAKE_CASE = kernel_size __SCREAMING_SNAKE_CASE = last_kernel_size __SCREAMING_SNAKE_CASE = residual_kernel_size __SCREAMING_SNAKE_CASE = dilation_growth_rate __SCREAMING_SNAKE_CASE = use_causal_conv __SCREAMING_SNAKE_CASE = pad_mode __SCREAMING_SNAKE_CASE = compress __SCREAMING_SNAKE_CASE = num_lstm_layers __SCREAMING_SNAKE_CASE = trim_right_ratio __SCREAMING_SNAKE_CASE = codebook_size __SCREAMING_SNAKE_CASE = codebook_dim if codebook_dim is not None else hidden_size __SCREAMING_SNAKE_CASE = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" ) super().__init__(**__SCREAMING_SNAKE_CASE ) @property def _a ( self : str ) -> Optional[int]: """simple docstring""" if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def _a ( self : List[str] ) -> Optional[int]: """simple docstring""" if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) @property def _a ( self : List[Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def _a ( self : int ) -> int: """simple docstring""" return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
706
"""simple docstring""" import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class A__( unittest.TestCase ): @property def _a ( self : Optional[Any] ) -> Tuple: """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def _a ( self : str ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.dummy_uncond_unet __SCREAMING_SNAKE_CASE = KarrasVeScheduler() __SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' , return_dict=__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __SCREAMING_SNAKE_CASE = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class A__( unittest.TestCase ): def _a ( self : Any ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = '''google/ncsnpp-celebahq-256''' __SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = KarrasVeScheduler() __SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe(num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) __SCREAMING_SNAKE_CASE = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
690
0
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ ={ "configuration_informer": [ "INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "InformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =[ "INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "InformerForPrediction", "InformerModel", "InformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys lowerCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
707
"""simple docstring""" import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase__ =logging.get_logger(__name__) lowerCAmelCase__ ={"vocab_file": "spiece.model"} lowerCAmelCase__ ={ "vocab_file": { "AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model", } } lowerCAmelCase__ ={ "AI-Sweden/gpt-sw3-126m": 2_048, "AI-Sweden/gpt-sw3-350m": 2_048, "AI-Sweden/gpt-sw3-1.6b": 2_048, "AI-Sweden/gpt-sw3-6.7b": 2_048, "AI-Sweden/gpt-sw3-20b": 2_048, } class A__( __magic_name__ ): lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = ['''input_ids''', '''attention_mask'''] def __init__( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Dict , ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs __SCREAMING_SNAKE_CASE = kwargs.get('''name_or_path''' ) if name_or_path is None: logger.warning( '''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,''' ''' you are testing the model, this can safely be ignored''' ) __SCREAMING_SNAKE_CASE = '''None''' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing __SCREAMING_SNAKE_CASE = '''<|endoftext|>''' if eos_token is None else eos_token __SCREAMING_SNAKE_CASE = '''<unk>''' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: __SCREAMING_SNAKE_CASE = unk_token if pad_token is None else pad_token __SCREAMING_SNAKE_CASE = eos_token if bos_token is None else bos_token else: __SCREAMING_SNAKE_CASE = '''<pad>''' if pad_token is None else pad_token __SCREAMING_SNAKE_CASE = '''<s>''' if bos_token is None else bos_token super().__init__( do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = do_lower_case __SCREAMING_SNAKE_CASE = remove_space __SCREAMING_SNAKE_CASE = keep_accents __SCREAMING_SNAKE_CASE = vocab_file __SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__SCREAMING_SNAKE_CASE ) # Used for whitespace normalization in input texts # fmt : off __SCREAMING_SNAKE_CASE = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', '''„'''} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing __SCREAMING_SNAKE_CASE = re.compile( f"""[{"".join(map(__SCREAMING_SNAKE_CASE , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(1_27 , 1_60 ) ) + [1_60, 1_73, 82_03] ) )}]""" ) def __getstate__( self : List[str] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.__dict__.copy() __SCREAMING_SNAKE_CASE = None return state def __setstate__( self : int , __SCREAMING_SNAKE_CASE : Optional[int] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def _a ( self : Optional[Any] ) -> int: """simple docstring""" return len(self.sp_model ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.non_printing_characters_re.sub('''''' , __SCREAMING_SNAKE_CASE ) # Normalize whitespaces __SCREAMING_SNAKE_CASE = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] ) # NFC Unicode normalization __SCREAMING_SNAKE_CASE = unicodedata.normalize('''NFC''' , __SCREAMING_SNAKE_CASE ) return text def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.preprocess_text(__SCREAMING_SNAKE_CASE ) return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : str ) -> int: """simple docstring""" return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> str: """simple docstring""" return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE ) @staticmethod def _a ( __SCREAMING_SNAKE_CASE : str ) -> str: """simple docstring""" return out_string def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = '''''' __SCREAMING_SNAKE_CASE = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = [] else: current_sub_tokens.append(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = False out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) return out_string def _a ( self : Union[str, Any] ) -> Dict[str, int]: """simple docstring""" __SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __SCREAMING_SNAKE_CASE = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi: __SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE ) return (out_vocab_file,) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: """simple docstring""" if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = self.preprocess_text(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.sp_model.encode(__SCREAMING_SNAKE_CASE ) else: __SCREAMING_SNAKE_CASE = [self.preprocess_text(__SCREAMING_SNAKE_CASE ) for t in text] __SCREAMING_SNAKE_CASE = self.sp_model.encode(__SCREAMING_SNAKE_CASE ) if return_tensors is True or return_tensors == "pt": __SCREAMING_SNAKE_CASE = torch.tensor(__SCREAMING_SNAKE_CASE ) return token_ids def _a ( self : Any , __SCREAMING_SNAKE_CASE : Union[int, List[int]] ) -> str: """simple docstring""" return self.sp_model.decode(__SCREAMING_SNAKE_CASE ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : "Conversation" ) -> List[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()] __SCREAMING_SNAKE_CASE = ( f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(__SCREAMING_SNAKE_CASE ) + f"""{self.bos_token}Bot:""" ) return self.encode(text=__SCREAMING_SNAKE_CASE )
690
0
from __future__ import annotations def _a ( UpperCAmelCase__ ) -> int: if not nums: return 0 __SCREAMING_SNAKE_CASE = nums[0] __SCREAMING_SNAKE_CASE = 0 for num in nums[1:]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = ( max_excluding + num, max(__UpperCamelCase , __UpperCamelCase ), ) return max(__UpperCamelCase , __UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
708
"""simple docstring""" from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent lowerCAmelCase__ ={"UserAgent": UserAgent().random} def _a ( UpperCAmelCase__ ) -> dict: __SCREAMING_SNAKE_CASE = script.contents[0] __SCREAMING_SNAKE_CASE = json.loads(data[data.find('''{"config"''' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class A__: def __init__( self : Dict , __SCREAMING_SNAKE_CASE : int ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = f"""https://www.instagram.com/{username}/""" __SCREAMING_SNAKE_CASE = self.get_json() def _a ( self : List[Any] ) -> dict: """simple docstring""" __SCREAMING_SNAKE_CASE = requests.get(self.url , headers=__SCREAMING_SNAKE_CASE ).text __SCREAMING_SNAKE_CASE = BeautifulSoup(__SCREAMING_SNAKE_CASE , '''html.parser''' ).find_all('''script''' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : Tuple ) -> str: """simple docstring""" return f"""{self.__class__.__name__}('{self.username}')""" def __str__( self : Optional[int] ) -> str: """simple docstring""" return f"""{self.fullname} ({self.username}) is {self.biography}""" @property def _a ( self : Tuple ) -> str: """simple docstring""" return self.user_data["username"] @property def _a ( self : List[Any] ) -> str: """simple docstring""" return self.user_data["full_name"] @property def _a ( self : Optional[Any] ) -> str: """simple docstring""" return self.user_data["biography"] @property def _a ( self : List[str] ) -> str: """simple docstring""" return self.user_data["business_email"] @property def _a ( self : Any ) -> str: """simple docstring""" return self.user_data["external_url"] @property def _a ( self : Any ) -> int: """simple docstring""" return self.user_data["edge_followed_by"]["count"] @property def _a ( self : Dict ) -> int: """simple docstring""" return self.user_data["edge_follow"]["count"] @property def _a ( self : str ) -> int: """simple docstring""" return self.user_data["edge_owner_to_timeline_media"]["count"] @property def _a ( self : Union[str, Any] ) -> str: """simple docstring""" return self.user_data["profile_pic_url_hd"] @property def _a ( self : Tuple ) -> bool: """simple docstring""" return self.user_data["is_verified"] @property def _a ( self : Union[str, Any] ) -> bool: """simple docstring""" return self.user_data["is_private"] def _a ( UpperCAmelCase__ = "github" ) -> None: import os if os.environ.get('''CI''' ): return # test failing on GitHub Actions __SCREAMING_SNAKE_CASE = InstagramUser(UpperCAmelCase__ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , UpperCAmelCase__ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_50 assert instagram_user.number_of_followers > 12_00_00 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "[email protected]" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('''https://instagram.''' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase__ =InstagramUser("github") print(instagram_user) print(F'''{instagram_user.number_of_posts = }''') print(F'''{instagram_user.number_of_followers = }''') print(F'''{instagram_user.number_of_followings = }''') print(F'''{instagram_user.email = }''') print(F'''{instagram_user.website = }''') print(F'''{instagram_user.profile_picture_url = }''') print(F'''{instagram_user.is_verified = }''') print(F'''{instagram_user.is_private = }''')
690
0
"""simple docstring""" import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class A__( unittest.TestCase ): '''simple docstring''' def _a ( self : Union[str, Any] ) -> Any: """simple docstring""" super().tearDown() gc.collect() def _a ( self : List[str] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained( '''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE = jax.device_count() __SCREAMING_SNAKE_CASE = num_samples * [prompt] __SCREAMING_SNAKE_CASE = sd_pipe.prepare_inputs(_lowerCAmelCase ) __SCREAMING_SNAKE_CASE = replicate(_lowerCAmelCase ) __SCREAMING_SNAKE_CASE = shard(_lowerCAmelCase ) __SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE = jax.random.split(_lowerCAmelCase , jax.device_count() ) __SCREAMING_SNAKE_CASE = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0] assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3) __SCREAMING_SNAKE_CASE = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE = images[0, 2_53:2_56, 2_53:2_56, -1] __SCREAMING_SNAKE_CASE = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def _a ( self : str ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = '''stabilityai/stable-diffusion-2''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowerCAmelCase , subfolder='''scheduler''' ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained( _lowerCAmelCase , scheduler=_lowerCAmelCase , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE = scheduler_params __SCREAMING_SNAKE_CASE = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE = jax.device_count() __SCREAMING_SNAKE_CASE = num_samples * [prompt] __SCREAMING_SNAKE_CASE = sd_pipe.prepare_inputs(_lowerCAmelCase ) __SCREAMING_SNAKE_CASE = replicate(_lowerCAmelCase ) __SCREAMING_SNAKE_CASE = shard(_lowerCAmelCase ) __SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE = jax.random.split(_lowerCAmelCase , jax.device_count() ) __SCREAMING_SNAKE_CASE = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0] assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3) __SCREAMING_SNAKE_CASE = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE = images[0, 2_53:2_56, 2_53:2_56, -1] __SCREAMING_SNAKE_CASE = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
709
"""simple docstring""" from sklearn.metrics import recall_score import datasets lowerCAmelCase__ ="\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n" lowerCAmelCase__ ="\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n" lowerCAmelCase__ ="\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__( datasets.Metric ): def _a ( self : Any ) -> int: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ), '''references''': datasets.Sequence(datasets.Value('''int32''' ) ), } if self.config_name == '''multilabel''' else { '''predictions''': datasets.Value('''int32''' ), '''references''': datasets.Value('''int32''' ), } ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]=1 , __SCREAMING_SNAKE_CASE : Optional[Any]="binary" , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : List[Any]="warn" , ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = recall_score( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , pos_label=__SCREAMING_SNAKE_CASE , average=__SCREAMING_SNAKE_CASE , sample_weight=__SCREAMING_SNAKE_CASE , zero_division=__SCREAMING_SNAKE_CASE , ) return {"recall": float(__SCREAMING_SNAKE_CASE ) if score.size == 1 else score}
690
0
"""simple docstring""" import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ =logging.get_logger(__name__) def _a ( UpperCAmelCase__ ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: __SCREAMING_SNAKE_CASE = 1_28 elif "12-12" in model_name: __SCREAMING_SNAKE_CASE = 12 __SCREAMING_SNAKE_CASE = 12 elif "14-14" in model_name: __SCREAMING_SNAKE_CASE = 14 __SCREAMING_SNAKE_CASE = 14 elif "16-16" in model_name: __SCREAMING_SNAKE_CASE = 16 __SCREAMING_SNAKE_CASE = 16 else: raise ValueError('''Model not supported''' ) __SCREAMING_SNAKE_CASE = """huggingface/label-files""" if "speech-commands" in model_name: __SCREAMING_SNAKE_CASE = 35 __SCREAMING_SNAKE_CASE = """speech-commands-v2-id2label.json""" else: __SCREAMING_SNAKE_CASE = 5_27 __SCREAMING_SNAKE_CASE = """audioset-id2label.json""" __SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type='''dataset''' ) , '''r''' ) ) __SCREAMING_SNAKE_CASE = {int(snake_case_ ): v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE = idalabel __SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} return config def _a ( UpperCAmelCase__ ) -> Tuple: if "module.v" in name: __SCREAMING_SNAKE_CASE = name.replace('''module.v''' , '''audio_spectrogram_transformer''' ) if "cls_token" in name: __SCREAMING_SNAKE_CASE = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "dist_token" in name: __SCREAMING_SNAKE_CASE = name.replace('''dist_token''' , '''embeddings.distillation_token''' ) if "pos_embed" in name: __SCREAMING_SNAKE_CASE = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: __SCREAMING_SNAKE_CASE = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) # transformer blocks if "blocks" in name: __SCREAMING_SNAKE_CASE = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: __SCREAMING_SNAKE_CASE = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: __SCREAMING_SNAKE_CASE = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: __SCREAMING_SNAKE_CASE = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: __SCREAMING_SNAKE_CASE = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: __SCREAMING_SNAKE_CASE = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: __SCREAMING_SNAKE_CASE = name.replace('''mlp.fc2''' , '''output.dense''' ) # final layernorm if "audio_spectrogram_transformer.norm" in name: __SCREAMING_SNAKE_CASE = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' ) # classifier head if "module.mlp_head.0" in name: __SCREAMING_SNAKE_CASE = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' ) if "module.mlp_head.1" in name: __SCREAMING_SNAKE_CASE = name.replace('''module.mlp_head.1''' , '''classifier.dense''' ) return name def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> Tuple: for key in orig_state_dict.copy().keys(): __SCREAMING_SNAKE_CASE = orig_state_dict.pop(snake_case_ ) if "qkv" in key: __SCREAMING_SNAKE_CASE = key.split('''.''' ) __SCREAMING_SNAKE_CASE = int(key_split[3] ) __SCREAMING_SNAKE_CASE = config.hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE = val[:dim, :] __SCREAMING_SNAKE_CASE = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE = val[-dim:, :] else: __SCREAMING_SNAKE_CASE = val[:dim] __SCREAMING_SNAKE_CASE = val[dim : dim * 2] __SCREAMING_SNAKE_CASE = val[-dim:] else: __SCREAMING_SNAKE_CASE = val return orig_state_dict def _a ( UpperCAmelCase__ ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = [ """module.v.head.weight""", """module.v.head.bias""", """module.v.head_dist.weight""", """module.v.head_dist.bias""", ] for k in ignore_keys: state_dict.pop(snake_case_ , snake_case_ ) @torch.no_grad() def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=False ) -> List[str]: __SCREAMING_SNAKE_CASE = get_audio_spectrogram_transformer_config(snake_case_ ) __SCREAMING_SNAKE_CASE = { """ast-finetuned-audioset-10-10-0.4593""": ( """https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.450""": ( """https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.448""": ( """https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.448-v2""": ( """https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1""" ), """ast-finetuned-audioset-12-12-0.447""": ( """https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1""" ), """ast-finetuned-audioset-14-14-0.443""": ( """https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1""" ), """ast-finetuned-audioset-16-16-0.442""": ( """https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1""" ), """ast-finetuned-speech-commands-v2""": ( """https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1""" ), } # load original state_dict __SCREAMING_SNAKE_CASE = model_name_to_url[model_name] __SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(snake_case_ , map_location='''cpu''' ) # remove some keys remove_keys(snake_case_ ) # rename some keys __SCREAMING_SNAKE_CASE = convert_state_dict(snake_case_ , snake_case_ ) # load 🤗 model __SCREAMING_SNAKE_CASE = ASTForAudioClassification(snake_case_ ) model.eval() model.load_state_dict(snake_case_ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 __SCREAMING_SNAKE_CASE = -4.2677393 if """speech-commands""" not in model_name else -6.845978 __SCREAMING_SNAKE_CASE = 4.5689974 if """speech-commands""" not in model_name else 5.5654526 __SCREAMING_SNAKE_CASE = 10_24 if """speech-commands""" not in model_name else 1_28 __SCREAMING_SNAKE_CASE = ASTFeatureExtractor(mean=snake_case_ , std=snake_case_ , max_length=snake_case_ ) if "speech-commands" in model_name: __SCREAMING_SNAKE_CASE = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' ) __SCREAMING_SNAKE_CASE = dataset[0]["""audio"""]["""array"""] else: __SCREAMING_SNAKE_CASE = hf_hub_download( repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , ) __SCREAMING_SNAKE_CASE = torchaudio.load(snake_case_ ) __SCREAMING_SNAKE_CASE = waveform.squeeze().numpy() __SCREAMING_SNAKE_CASE = feature_extractor(snake_case_ , sampling_rate=1_60_00 , return_tensors='''pt''' ) # forward pass __SCREAMING_SNAKE_CASE = model(**snake_case_ ) __SCREAMING_SNAKE_CASE = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": __SCREAMING_SNAKE_CASE = torch.tensor([-0.8760, -7.0042, -8.6602] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": __SCREAMING_SNAKE_CASE = torch.tensor([-1.1986, -7.0903, -8.2718] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": __SCREAMING_SNAKE_CASE = torch.tensor([-2.6128, -8.0080, -9.4344] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": __SCREAMING_SNAKE_CASE = torch.tensor([-1.5080, -7.4534, -8.8917] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": __SCREAMING_SNAKE_CASE = torch.tensor([-0.5050, -6.5833, -8.0843] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": __SCREAMING_SNAKE_CASE = torch.tensor([-0.3826, -7.0336, -8.2413] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": __SCREAMING_SNAKE_CASE = torch.tensor([-1.2113, -6.9101, -8.3470] ) elif model_name == "ast-finetuned-speech-commands-v2": __SCREAMING_SNAKE_CASE = torch.tensor([6.1589, -8.0566, -8.7984] ) else: raise ValueError('''Unknown model name''' ) if not torch.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ): raise ValueError('''Logits don\'t match''' ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(snake_case_ ) print(f"""Saving feature extractor to {pytorch_dump_folder_path}""" ) feature_extractor.save_pretrained(snake_case_ ) if push_to_hub: print('''Pushing model and feature extractor to the hub...''' ) model.push_to_hub(f"""MIT/{model_name}""" ) feature_extractor.push_to_hub(f"""MIT/{model_name}""" ) if __name__ == "__main__": lowerCAmelCase__ =argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="ast-finetuned-audioset-10-10-0.4593", type=str, help="Name of the Audio Spectrogram Transformer model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) lowerCAmelCase__ =parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
710
"""simple docstring""" def _a ( UpperCAmelCase__ = 10**9 ) -> int: __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 2 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value __SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(F'''{solution() = }''')
690
0
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ =get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right lowerCAmelCase__ =250_004 lowerCAmelCase__ =250_020 @require_sentencepiece @require_tokenizers class A__( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): lowerCAmelCase = MBartTokenizer lowerCAmelCase = MBartTokenizerFast lowerCAmelCase = True lowerCAmelCase = True def _a ( self : Dict ) -> Optional[Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __SCREAMING_SNAKE_CASE = MBartTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def _a ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = MBartTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(UpperCamelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) __SCREAMING_SNAKE_CASE = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( UpperCamelCase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) __SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) self.assertListEqual( UpperCamelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) __SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(UpperCamelCase__ ) self.assertListEqual( UpperCamelCase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def _a ( self : Union[str, Any] ) -> str: """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __SCREAMING_SNAKE_CASE = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(UpperCamelCase__ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) __SCREAMING_SNAKE_CASE = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(UpperCamelCase__ , UpperCamelCase__ ) # Checks everything loads correctly in the same way __SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(UpperCamelCase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase__ , UpperCamelCase__ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(UpperCamelCase__ ) # Save tokenizer rust, legacy_format=True __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(UpperCamelCase__ , legacy_format=UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(UpperCamelCase__ ) # Checks it save with the same files self.assertSequenceEqual(UpperCamelCase__ , UpperCamelCase__ ) # Checks everything loads correctly in the same way __SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(UpperCamelCase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase__ , UpperCamelCase__ ) ) shutil.rmtree(UpperCamelCase__ ) # Save tokenizer rust, legacy_format=False __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(UpperCamelCase__ , legacy_format=UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(UpperCamelCase__ ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way __SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(UpperCamelCase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase__ , UpperCamelCase__ ) ) shutil.rmtree(UpperCamelCase__ ) @require_torch @require_sentencepiece @require_tokenizers class A__( unittest.TestCase ): lowerCAmelCase = '''facebook/mbart-large-en-ro''' lowerCAmelCase = [ ''' UN Chief Says There Is No Military Solution in Syria''', ''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''', ] lowerCAmelCase = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', '''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei''' ''' pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor''' ''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''', ] lowerCAmelCase = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE] @classmethod def _a ( cls : Dict ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' ) __SCREAMING_SNAKE_CASE = 1 return cls def _a ( self : Any ) -> List[Any]: """simple docstring""" self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 ) def _a ( self : Dict ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , UpperCamelCase__ ) def _a ( self : Optional[int] ) -> Tuple: """simple docstring""" self.assertIn(UpperCamelCase__ , self.tokenizer.all_special_ids ) __SCREAMING_SNAKE_CASE = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2] __SCREAMING_SNAKE_CASE = self.tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase__ ) def _a ( self : Optional[Any] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = ['''this is gunna be a long sentence ''' * 20] assert isinstance(src_text[0] , UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = 10 __SCREAMING_SNAKE_CASE = self.tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , UpperCamelCase__ ) self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) def _a ( self : List[str] ) -> List[Any]: """simple docstring""" self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_26, 25_00_01] ) def _a ( self : Dict ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = MBartTokenizer.from_pretrained(UpperCamelCase__ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase__ ) @require_torch def _a ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase__ , return_tensors='''pt''' ) __SCREAMING_SNAKE_CASE = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def _a ( self : Optional[Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) __SCREAMING_SNAKE_CASE = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) __SCREAMING_SNAKE_CASE = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , UpperCamelCase__ ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] ) def _a ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=3 , return_tensors='''pt''' ) __SCREAMING_SNAKE_CASE = self.tokenizer( text_target=self.tgt_text , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=10 , return_tensors='''pt''' ) __SCREAMING_SNAKE_CASE = targets['''input_ids'''] __SCREAMING_SNAKE_CASE = shift_tokens_right(UpperCamelCase__ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(UpperCamelCase__ ) , { # A, test, EOS, en_XX '''input_ids''': [[62, 30_34, 2, 25_00_04]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 25_00_01, } , )
711
"""simple docstring""" import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset lowerCAmelCase__ =pd.read_csv( "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/" "position_salaries.csv" ) lowerCAmelCase__ =dataset.iloc[:, 1:2].values lowerCAmelCase__ =dataset.iloc[:, 2].values lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ =train_test_split(X, y, test_size=0.2, random_state=0) lowerCAmelCase__ =PolynomialFeatures(degree=4) lowerCAmelCase__ =poly_reg.fit_transform(X) lowerCAmelCase__ =LinearRegression() pol_reg.fit(X_poly, y) def _a ( ) -> List[Any]: plt.scatter(UpperCAmelCase__ , UpperCAmelCase__ , color='''red''' ) plt.plot(UpperCAmelCase__ , pol_reg.predict(poly_reg.fit_transform(UpperCAmelCase__ ) ) , color='''blue''' ) plt.title('''Truth or Bluff (Linear Regression)''' ) plt.xlabel('''Position level''' ) plt.ylabel('''Salary''' ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
690
0
"""simple docstring""" import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class A__( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase = StableUnCLIPPipeline lowerCAmelCase = TEXT_TO_IMAGE_PARAMS lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false lowerCAmelCase = False def _a ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = 32 __SCREAMING_SNAKE_CASE = embedder_hidden_size # prior components torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCamelCase , projection_dim=_lowerCamelCase , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowerCamelCase , num_layers=1 , ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = DDPMScheduler( variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=_lowerCamelCase , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , ) # regular denoising components torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = StableUnCLIPImageNormalizer(embedding_dim=_lowerCamelCase ) __SCREAMING_SNAKE_CASE = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowerCamelCase , layers_per_block=1 , upcast_attention=_lowerCamelCase , use_linear_projection=_lowerCamelCase , ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = DDIMScheduler( beta_schedule='''scaled_linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowerCamelCase , steps_offset=1 , ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = AutoencoderKL() __SCREAMING_SNAKE_CASE = { # prior components '''prior_tokenizer''': prior_tokenizer, '''prior_text_encoder''': prior_text_encoder, '''prior''': prior, '''prior_scheduler''': prior_scheduler, # image noising components '''image_normalizer''': image_normalizer, '''image_noising_scheduler''': image_noising_scheduler, # regular denoising components '''tokenizer''': tokenizer, '''text_encoder''': text_encoder, '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, } return components def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any]=0 ) -> str: """simple docstring""" if str(_lowerCamelCase ).startswith('''mps''' ): __SCREAMING_SNAKE_CASE = torch.manual_seed(_lowerCamelCase ) else: __SCREAMING_SNAKE_CASE = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) __SCREAMING_SNAKE_CASE = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''prior_num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def _a ( self : List[Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = torch_device == '''cpu''' self._test_attention_slicing_forward_pass(test_max_difference=_lowerCamelCase ) def _a ( self : Optional[int] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = torch_device in ['''cpu''', '''mps'''] self._test_inference_batch_single_identical(test_max_difference=_lowerCamelCase ) @slow @require_torch_gpu class A__( unittest.TestCase ): def _a ( self : Optional[int] ) -> Dict: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : List[Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' ) __SCREAMING_SNAKE_CASE = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa ) pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __SCREAMING_SNAKE_CASE = torch.Generator(device='''cpu''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe('''anime turle''' , generator=_lowerCamelCase , output_type='''np''' ) __SCREAMING_SNAKE_CASE = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ) -> str: """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __SCREAMING_SNAKE_CASE = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa ) __SCREAMING_SNAKE_CASE = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __SCREAMING_SNAKE_CASE = pipe( '''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , ) __SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
712
"""simple docstring""" from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class A__: lowerCAmelCase = MBartConfig lowerCAmelCase = {} lowerCAmelCase = '''gelu''' def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple=13 , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=99 , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Any=4 , __SCREAMING_SNAKE_CASE : List[str]=37 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Any=20 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : Optional[int]=1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0 , ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = eos_token_id __SCREAMING_SNAKE_CASE = pad_token_id __SCREAMING_SNAKE_CASE = bos_token_id def _a ( self : List[str] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __SCREAMING_SNAKE_CASE = prepare_mbart_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return config, inputs_dict def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFMBartModel(config=__SCREAMING_SNAKE_CASE ).get_decoder() __SCREAMING_SNAKE_CASE = inputs_dict['''input_ids'''] __SCREAMING_SNAKE_CASE = input_ids[:1, :] __SCREAMING_SNAKE_CASE = inputs_dict['''attention_mask'''][:1, :] __SCREAMING_SNAKE_CASE = inputs_dict['''head_mask'''] __SCREAMING_SNAKE_CASE = 1 # first forward pass __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple() __SCREAMING_SNAKE_CASE = past_key_values[1] def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , ) -> Optional[int]: if attention_mask is None: __SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(UpperCAmelCase__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __SCREAMING_SNAKE_CASE = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class A__( __magic_name__ , __magic_name__ , unittest.TestCase ): lowerCAmelCase = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () lowerCAmelCase = (TFMBartForConditionalGeneration,) if is_tf_available() else () lowerCAmelCase = ( { '''conversational''': TFMBartForConditionalGeneration, '''feature-extraction''': TFMBartModel, '''summarization''': TFMBartForConditionalGeneration, '''text2text-generation''': TFMBartForConditionalGeneration, '''translation''': TFMBartForConditionalGeneration, } if is_tf_available() else {} ) lowerCAmelCase = True lowerCAmelCase = False lowerCAmelCase = False def _a ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]: """simple docstring""" if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def _a ( self : List[Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFMBartModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE ) def _a ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() def _a ( self : int ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__SCREAMING_SNAKE_CASE ) @require_sentencepiece @require_tokenizers @require_tf class A__( unittest.TestCase ): lowerCAmelCase = [ ''' UN Chief Says There Is No Military Solution in Syria''', ] lowerCAmelCase = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', ] lowerCAmelCase = '''facebook/mbart-large-en-ro''' @cached_property def _a ( self : Optional[int] ) -> str: """simple docstring""" return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def _a ( self : Optional[Any] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def _a ( self : Any , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.translate_src_text(**__SCREAMING_SNAKE_CASE ) self.assertListEqual(self.expected_text , __SCREAMING_SNAKE_CASE ) def _a ( self : Any , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , **__SCREAMING_SNAKE_CASE , return_tensors='''tf''' ) __SCREAMING_SNAKE_CASE = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) __SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) return generated_words @slow def _a ( self : Union[str, Any] ) -> Tuple: """simple docstring""" self._assert_generated_batch_equal_expected()
690
0
"""simple docstring""" import unittest import numpy as np def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , ) -> np.ndarray: __SCREAMING_SNAKE_CASE = np.shape(__snake_case ) __SCREAMING_SNAKE_CASE = np.shape(__snake_case ) __SCREAMING_SNAKE_CASE = np.shape(__snake_case ) if shape_a[0] != shape_b[0]: __SCREAMING_SNAKE_CASE = ( "Expected the same number of rows for A and B. " f"""Instead found A of size {shape_a} and B of size {shape_b}""" ) raise ValueError(__snake_case ) if shape_b[1] != shape_c[1]: __SCREAMING_SNAKE_CASE = ( "Expected the same number of columns for B and C. " f"""Instead found B of size {shape_b} and C of size {shape_c}""" ) raise ValueError(__snake_case ) __SCREAMING_SNAKE_CASE = pseudo_inv if a_inv is None: try: __SCREAMING_SNAKE_CASE = np.linalg.inv(__snake_case ) except np.linalg.LinAlgError: raise ValueError( '''Input matrix A is not invertible. Cannot compute Schur complement.''' ) return mat_c - mat_b.T @ a_inv @ mat_b class A__( unittest.TestCase ): def _a ( self : Tuple ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) __SCREAMING_SNAKE_CASE = np.array([[0, 3], [3, 0], [2, 3]] ) __SCREAMING_SNAKE_CASE = np.array([[2, 1], [6, 3]] ) __SCREAMING_SNAKE_CASE = schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = np.block([[a, b], [b.T, c]] ) __SCREAMING_SNAKE_CASE = np.linalg.det(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = np.linalg.det(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = np.linalg.det(__SCREAMING_SNAKE_CASE ) self.assertAlmostEqual(__SCREAMING_SNAKE_CASE , det_a * det_s ) def _a ( self : List[Any] ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) __SCREAMING_SNAKE_CASE = np.array([[0, 3], [3, 0], [2, 3]] ) __SCREAMING_SNAKE_CASE = np.array([[2, 1], [6, 3]] ) with self.assertRaises(__SCREAMING_SNAKE_CASE ): schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : str ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) __SCREAMING_SNAKE_CASE = np.array([[0, 3], [3, 0], [2, 3]] ) __SCREAMING_SNAKE_CASE = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(__SCREAMING_SNAKE_CASE ): schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
713
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase__ =logging.get_logger(__name__) lowerCAmelCase__ ={ "Visual-Attention-Network/van-base": ( "https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json" ), } class A__( __magic_name__ ): lowerCAmelCase = '''van''' def __init__( self : int , __SCREAMING_SNAKE_CASE : Optional[Any]=2_24 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : Tuple=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE : Optional[int]=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE : str=[64, 1_28, 3_20, 5_12] , __SCREAMING_SNAKE_CASE : Optional[Any]=[3, 3, 12, 3] , __SCREAMING_SNAKE_CASE : Dict=[8, 8, 4, 4] , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : Dict=1E-6 , __SCREAMING_SNAKE_CASE : Any=1E-2 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , **__SCREAMING_SNAKE_CASE : str , ) -> List[str]: """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = patch_sizes __SCREAMING_SNAKE_CASE = strides __SCREAMING_SNAKE_CASE = hidden_sizes __SCREAMING_SNAKE_CASE = depths __SCREAMING_SNAKE_CASE = mlp_ratios __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = layer_scale_init_value __SCREAMING_SNAKE_CASE = drop_path_rate __SCREAMING_SNAKE_CASE = dropout_rate
690
0
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class A__( __lowercase ): lowerCAmelCase = 'ClapFeatureExtractor' lowerCAmelCase = ('RobertaTokenizer', 'RobertaTokenizerFast') def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" super().__init__(__A , __A ) def __call__( self : int , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : List[str] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = kwargs.pop('''sampling_rate''' , __A ) if text is None and audios is None: raise ValueError('''You have to specify either text or audios. Both cannot be none.''' ) if text is not None: __SCREAMING_SNAKE_CASE = self.tokenizer(__A , return_tensors=__A , **__A ) if audios is not None: __SCREAMING_SNAKE_CASE = self.feature_extractor( __A , sampling_rate=__A , return_tensors=__A , **__A ) if text is not None and audios is not None: __SCREAMING_SNAKE_CASE = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__A ) , tensor_type=__A ) def _a ( self : Tuple , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> int: """simple docstring""" return self.tokenizer.batch_decode(*__A , **__A ) def _a ( self : Any , *__SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : int ) -> Optional[int]: """simple docstring""" return self.tokenizer.decode(*__A , **__A ) @property def _a ( self : Dict ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names __SCREAMING_SNAKE_CASE = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
714
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ ={"configuration_timm_backbone": ["TimmBackboneConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =["TimmBackbone"] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys lowerCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
690
0
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class A__( UpperCamelCase__ , unittest.TestCase ): lowerCAmelCase = ShapEPipeline lowerCAmelCase = ['''prompt'''] lowerCAmelCase = ['''prompt'''] lowerCAmelCase = [ '''num_images_per_prompt''', '''num_inference_steps''', '''generator''', '''latents''', '''guidance_scale''', '''frame_size''', '''output_type''', '''return_dict''', ] lowerCAmelCase = False @property def _a ( self : Optional[Any] ) -> str: """simple docstring""" return 32 @property def _a ( self : Dict ) -> str: """simple docstring""" return 32 @property def _a ( self : str ) -> Tuple: """simple docstring""" return self.time_input_dim * 4 @property def _a ( self : Optional[int] ) -> Any: """simple docstring""" return 8 @property def _a ( self : Any ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def _a ( self : Tuple ) -> Dict: """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModelWithProjection(__A ) @property def _a ( self : str ) -> int: """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = { '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } __SCREAMING_SNAKE_CASE = PriorTransformer(**__A ) return model @property def _a ( self : List[Any] ) -> Optional[int]: """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = { '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } __SCREAMING_SNAKE_CASE = ShapERenderer(**__A ) return model def _a ( self : int ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.dummy_prior __SCREAMING_SNAKE_CASE = self.dummy_text_encoder __SCREAMING_SNAKE_CASE = self.dummy_tokenizer __SCREAMING_SNAKE_CASE = self.dummy_renderer __SCREAMING_SNAKE_CASE = HeunDiscreteScheduler( beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=__A , clip_sample=__A , clip_sample_range=1.0 , ) __SCREAMING_SNAKE_CASE = { '''prior''': prior, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''renderer''': renderer, '''scheduler''': scheduler, } return components def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=0 ) -> Optional[int]: """simple docstring""" if str(__A ).startswith('''mps''' ): __SCREAMING_SNAKE_CASE = torch.manual_seed(__A ) else: __SCREAMING_SNAKE_CASE = torch.Generator(device=__A ).manual_seed(__A ) __SCREAMING_SNAKE_CASE = { '''prompt''': '''horse''', '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def _a ( self : str ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = '''cpu''' __SCREAMING_SNAKE_CASE = self.get_dummy_components() __SCREAMING_SNAKE_CASE = self.pipeline_class(**__A ) __SCREAMING_SNAKE_CASE = pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) __SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(__A ) ) __SCREAMING_SNAKE_CASE = output.images[0] __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __SCREAMING_SNAKE_CASE = np.array( [ 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _a ( self : Dict ) -> Any: """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def _a ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = torch_device == '''cpu''' __SCREAMING_SNAKE_CASE = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=__A , relax_max_difference=__A , ) def _a ( self : Tuple ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.get_dummy_components() __SCREAMING_SNAKE_CASE = self.pipeline_class(**__A ) __SCREAMING_SNAKE_CASE = pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 2 __SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__A ) for key in inputs.keys(): if key in self.batch_params: __SCREAMING_SNAKE_CASE = batch_size * [inputs[key]] __SCREAMING_SNAKE_CASE = pipe(**__A , num_images_per_prompt=__A )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class A__( unittest.TestCase ): def _a ( self : Optional[Any] ) -> Tuple: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_np_out.npy''' ) __SCREAMING_SNAKE_CASE = ShapEPipeline.from_pretrained('''openai/shap-e''' ) __SCREAMING_SNAKE_CASE = pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) __SCREAMING_SNAKE_CASE = torch.Generator(device=__A ).manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe( '''a shark''' , generator=__A , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(__A , __A )
715
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase__ ={ "configuration_altclip": [ "ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "AltCLIPConfig", "AltCLIPTextConfig", "AltCLIPVisionConfig", ], "processing_altclip": ["AltCLIPProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =[ "ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "AltCLIPPreTrainedModel", "AltCLIPModel", "AltCLIPTextModel", "AltCLIPVisionModel", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys lowerCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
690
0
"""simple docstring""" import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class A__( __UpperCAmelCase ): def _a ( self : Union[str, Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''hidden_sizes''' ) ) self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''neck_hidden_sizes''' ) ) self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''num_attention_heads''' ) ) class A__: def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str]=13 , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=3 , __SCREAMING_SNAKE_CASE : List[Any]=6_40 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : Optional[Any]="silu" , __SCREAMING_SNAKE_CASE : int=3 , __SCREAMING_SNAKE_CASE : Tuple=32 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Any=0.02 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict=10 , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = patch_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = last_hidden_size __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = conv_kernel_size __SCREAMING_SNAKE_CASE = output_stride __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = classifier_dropout_prob __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = scope def _a ( self : Dict ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values, labels, pixel_labels def _a ( self : List[Any] ) -> str: """simple docstring""" return MobileViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def _a ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = MobileViTModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = MobileViTForImageClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = MobileViTForSemanticSegmentation(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def _a ( self : Optional[int] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs __SCREAMING_SNAKE_CASE = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class A__( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): lowerCAmelCase = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) lowerCAmelCase = ( { '''feature-extraction''': MobileViTModel, '''image-classification''': MobileViTForImageClassification, '''image-segmentation''': MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False def _a ( self : Any ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = MobileViTModelTester(self ) __SCREAMING_SNAKE_CASE = MobileViTConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE ) def _a ( self : Any ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''MobileViT does not use inputs_embeds''' ) def _a ( self : Tuple ) -> Optional[int]: """simple docstring""" pass @unittest.skip(reason='''MobileViT does not support input and output embeddings''' ) def _a ( self : str ) -> Dict: """simple docstring""" pass @unittest.skip(reason='''MobileViT does not output attentions''' ) def _a ( self : int ) -> Union[str, Any]: """simple docstring""" pass def _a ( self : Tuple ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] __SCREAMING_SNAKE_CASE = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def _a ( self : int ) -> Dict: """simple docstring""" pass def _a ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple ) -> Optional[Any]: """simple docstring""" def check_hidden_states_output(__SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] ): __SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE = outputs.hidden_states __SCREAMING_SNAKE_CASE = 5 self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. __SCREAMING_SNAKE_CASE = 2 for i in range(len(__SCREAMING_SNAKE_CASE ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE = True check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __SCREAMING_SNAKE_CASE = True check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : List[Any] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE ) @slow def _a ( self : Union[str, Any] ) -> int: """simple docstring""" for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = MobileViTModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def _a ( ) -> Dict: __SCREAMING_SNAKE_CASE = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class A__( unittest.TestCase ): @cached_property def _a ( self : int ) -> List[Any]: """simple docstring""" return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None @slow def _a ( self : Dict ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.default_image_processor __SCREAMING_SNAKE_CASE = prepare_img() __SCREAMING_SNAKE_CASE = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(**__SCREAMING_SNAKE_CASE ) # verify the logits __SCREAMING_SNAKE_CASE = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(__SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) ) @slow def _a ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' ) __SCREAMING_SNAKE_CASE = model.to(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' ) __SCREAMING_SNAKE_CASE = prepare_img() __SCREAMING_SNAKE_CASE = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = outputs.logits # verify the logits __SCREAMING_SNAKE_CASE = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor( [ [[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]], [[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]], [[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]], ] , device=__SCREAMING_SNAKE_CASE , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) ) @slow def _a ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' ) __SCREAMING_SNAKE_CASE = model.to(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' ) __SCREAMING_SNAKE_CASE = prepare_img() __SCREAMING_SNAKE_CASE = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = outputs.logits.detach().cpu() __SCREAMING_SNAKE_CASE = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE , target_sizes=[(50, 60)] ) __SCREAMING_SNAKE_CASE = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE )
716
"""simple docstring""" import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class A__( unittest.TestCase ): def _a ( self : int ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = 0 def _a ( self : Tuple ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : str ) -> Optional[int]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json''' __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : List[str] ) -> Optional[int]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json''' __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : str ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = CLIPConfig() # Create a dummy config file with image_proceesor_type __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json''' __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ).to_dict() config_dict.pop('''image_processor_type''' ) __SCREAMING_SNAKE_CASE = CLIPImageProcessor(**__SCREAMING_SNAKE_CASE ) # save in new folder model_config.save_pretrained(__SCREAMING_SNAKE_CASE ) config.save_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) # make sure private variable is not incorrectly saved __SCREAMING_SNAKE_CASE = json.loads(config.to_json_string() ) self.assertTrue('''_processor_class''' not in dict_as_saved ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : Optional[int] ) -> Tuple: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : List[Any] ) -> str: """simple docstring""" with self.assertRaisesRegex( __SCREAMING_SNAKE_CASE , '''clip-base is not a local folder and is not a valid model identifier''' ): __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''clip-base''' ) def _a ( self : Any ) -> Optional[Any]: """simple docstring""" with self.assertRaisesRegex( __SCREAMING_SNAKE_CASE , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE , revision='''aaaaaa''' ) def _a ( self : Dict ) -> Dict: """simple docstring""" with self.assertRaisesRegex( __SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ): __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' ) def _a ( self : int ) -> Any: """simple docstring""" with self.assertRaises(__SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(__SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE , trust_remote_code=__SCREAMING_SNAKE_CASE ) self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' ) def _a ( self : Optional[Any] ) -> str: """simple docstring""" try: AutoConfig.register('''custom''' , __SCREAMING_SNAKE_CASE ) AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__SCREAMING_SNAKE_CASE ): AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json''' __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) ) __SCREAMING_SNAKE_CASE = CustomImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def _a ( self : int ) -> List[Any]: """simple docstring""" class A__( __magic_name__ ): lowerCAmelCase = True try: AutoConfig.register('''custom''' , __SCREAMING_SNAKE_CASE ) AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # If remote code is not set, the default is to use local __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(not hasattr(__SCREAMING_SNAKE_CASE , '''is_local''' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
690
0
"""simple docstring""" from math import pi, sqrt, tan def _a ( UpperCAmelCase__ ) -> float: if side_length < 0: raise ValueError('''surface_area_cube() only accepts non-negative values''' ) return 6 * side_length**2 def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> float: if length < 0 or breadth < 0 or height < 0: raise ValueError('''surface_area_cuboid() only accepts non-negative values''' ) return 2 * ((length * breadth) + (breadth * height) + (length * height)) def _a ( UpperCAmelCase__ ) -> float: if radius < 0: raise ValueError('''surface_area_sphere() only accepts non-negative values''' ) return 4 * pi * radius**2 def _a ( UpperCAmelCase__ ) -> float: if radius < 0: raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' ) return 3 * pi * radius**2 def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> float: if radius < 0 or height < 0: raise ValueError('''surface_area_cone() only accepts non-negative values''' ) return pi * radius * (radius + (height**2 + radius**2) ** 0.5) def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> float: if radius_a < 0 or radius_a < 0 or height < 0: raise ValueError( '''surface_area_conical_frustum() only accepts non-negative values''' ) __SCREAMING_SNAKE_CASE = (height**2 + (radius_a - radius_a) ** 2) ** 0.5 return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2) def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> float: if radius < 0 or height < 0: raise ValueError('''surface_area_cylinder() only accepts non-negative values''' ) return 2 * pi * radius * (height + radius) def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> float: if torus_radius < 0 or tube_radius < 0: raise ValueError('''surface_area_torus() only accepts non-negative values''' ) if torus_radius < tube_radius: raise ValueError( '''surface_area_torus() does not support spindle or self intersecting tori''' ) return 4 * pow(UpperCAmelCase__ , 2 ) * torus_radius * tube_radius def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> float: if length < 0 or width < 0: raise ValueError('''area_rectangle() only accepts non-negative values''' ) return length * width def _a ( UpperCAmelCase__ ) -> float: if side_length < 0: raise ValueError('''area_square() only accepts non-negative values''' ) return side_length**2 def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> float: if base < 0 or height < 0: raise ValueError('''area_triangle() only accepts non-negative values''' ) return (base * height) / 2 def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> float: if sidea < 0 or sidea < 0 or sidea < 0: raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' ) elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea: raise ValueError('''Given three sides do not form a triangle''' ) __SCREAMING_SNAKE_CASE = (sidea + sidea + sidea) / 2 __SCREAMING_SNAKE_CASE = sqrt( semi_perimeter * (semi_perimeter - sidea) * (semi_perimeter - sidea) * (semi_perimeter - sidea) ) return area def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> float: if base < 0 or height < 0: raise ValueError('''area_parallelogram() only accepts non-negative values''' ) return base * height def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> float: if basea < 0 or basea < 0 or height < 0: raise ValueError('''area_trapezium() only accepts non-negative values''' ) return 1 / 2 * (basea + basea) * height def _a ( UpperCAmelCase__ ) -> float: if radius < 0: raise ValueError('''area_circle() only accepts non-negative values''' ) return pi * radius**2 def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> float: if radius_x < 0 or radius_y < 0: raise ValueError('''area_ellipse() only accepts non-negative values''' ) return pi * radius_x * radius_y def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> float: if diagonal_a < 0 or diagonal_a < 0: raise ValueError('''area_rhombus() only accepts non-negative values''' ) return 1 / 2 * diagonal_a * diagonal_a def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> float: if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or sides < 3: raise ValueError( '''area_reg_polygon() only accepts integers greater than or \ equal to three as number of sides''' ) elif length < 0: raise ValueError( '''area_reg_polygon() only accepts non-negative values as \ length of a side''' ) return (sides * length**2) / (4 * tan(pi / sides )) return (sides * length**2) / (4 * tan(pi / sides )) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) # verbose so we can see methods missing tests print("[DEMO] Areas of various geometric shapes: \n") print(F'''Rectangle: {area_rectangle(10, 20) = }''') print(F'''Square: {area_square(10) = }''') print(F'''Triangle: {area_triangle(10, 10) = }''') print(F'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''') print(F'''Parallelogram: {area_parallelogram(10, 20) = }''') print(F'''Rhombus: {area_rhombus(10, 20) = }''') print(F'''Trapezium: {area_trapezium(10, 20, 30) = }''') print(F'''Circle: {area_circle(20) = }''') print(F'''Ellipse: {area_ellipse(10, 20) = }''') print("\nSurface Areas of various geometric shapes: \n") print(F'''Cube: {surface_area_cube(20) = }''') print(F'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''') print(F'''Sphere: {surface_area_sphere(20) = }''') print(F'''Hemisphere: {surface_area_hemisphere(20) = }''') print(F'''Cone: {surface_area_cone(10, 20) = }''') print(F'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''') print(F'''Cylinder: {surface_area_cylinder(10, 20) = }''') print(F'''Torus: {surface_area_torus(20, 10) = }''') print(F'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''') print(F'''Square: {area_reg_polygon(4, 10) = }''') print(F'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
717
"""simple docstring""" import math lowerCAmelCase__ =10 lowerCAmelCase__ =7 lowerCAmelCase__ =BALLS_PER_COLOUR * NUM_COLOURS def _a ( UpperCAmelCase__ = 20 ) -> str: __SCREAMING_SNAKE_CASE = math.comb(UpperCAmelCase__ , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = NUM_COLOURS * (1 - missing_colour / total) return f"""{result:.9f}""" if __name__ == "__main__": print(solution(20))
690
0
import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class A__( __magic_name__ , unittest.TestCase ): lowerCAmelCase = '''ssube/stable-diffusion-x4-upscaler-onnx''' def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str=0 ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(__A ) ) __SCREAMING_SNAKE_CASE = torch.manual_seed(__A ) __SCREAMING_SNAKE_CASE = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def _a ( self : int ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=__A ) __SCREAMING_SNAKE_CASE = self.get_dummy_inputs() __SCREAMING_SNAKE_CASE = pipe(**__A ).images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 5_12, 5_12, 3) __SCREAMING_SNAKE_CASE = np.array( [0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def _a ( self : List[Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __SCREAMING_SNAKE_CASE = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__A ) pipe.set_progress_bar_config(disable=__A ) __SCREAMING_SNAKE_CASE = self.get_dummy_inputs() __SCREAMING_SNAKE_CASE = pipe(**__A ).images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) __SCREAMING_SNAKE_CASE = np.array( [0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def _a ( self : Tuple ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) __SCREAMING_SNAKE_CASE = self.get_dummy_inputs() __SCREAMING_SNAKE_CASE = pipe(**__A ).images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) __SCREAMING_SNAKE_CASE = np.array( [0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def _a ( self : List[Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __SCREAMING_SNAKE_CASE = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) __SCREAMING_SNAKE_CASE = self.get_dummy_inputs() __SCREAMING_SNAKE_CASE = pipe(**__A ).images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) __SCREAMING_SNAKE_CASE = np.array( [0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def _a ( self : List[Any] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __SCREAMING_SNAKE_CASE = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) __SCREAMING_SNAKE_CASE = self.get_dummy_inputs() __SCREAMING_SNAKE_CASE = pipe(**__A ).images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) __SCREAMING_SNAKE_CASE = np.array( [0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class A__( unittest.TestCase ): @property def _a ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _a ( self : str ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = ort.SessionOptions() __SCREAMING_SNAKE_CASE = False return options def _a ( self : List[Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) __SCREAMING_SNAKE_CASE = init_image.resize((1_28, 1_28) ) # using the PNDM scheduler by default __SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) __SCREAMING_SNAKE_CASE = '''A fantasy landscape, trending on artstation''' __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe( prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=10 , generator=__A , output_type='''np''' , ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 5_12, 3) __SCREAMING_SNAKE_CASE = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def _a ( self : Tuple ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) __SCREAMING_SNAKE_CASE = init_image.resize((1_28, 1_28) ) __SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' ) __SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=__A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) __SCREAMING_SNAKE_CASE = '''A fantasy landscape, trending on artstation''' __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe( prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=20 , generator=__A , output_type='''np''' , ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 5_12, 3) __SCREAMING_SNAKE_CASE = np.array( [0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
718
"""simple docstring""" from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax lowerCAmelCase__ =logging.get_logger(__name__) @add_end_docstrings(__magic_name__ ) class A__( __magic_name__ ): def __init__( self : Optional[Any] , **__SCREAMING_SNAKE_CASE : str ) -> Optional[Any]: """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) requires_backends(self , '''vision''' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image", List["Image"]] , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple: """simple docstring""" return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _a ( self : int , **__SCREAMING_SNAKE_CASE : int ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = {} if "candidate_labels" in kwargs: __SCREAMING_SNAKE_CASE = kwargs['''candidate_labels'''] if "hypothesis_template" in kwargs: __SCREAMING_SNAKE_CASE = kwargs['''hypothesis_template'''] return preprocess_params, {}, {} def _a ( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]="This is a photo of {}." ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = load_image(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.image_processor(images=[image] , return_tensors=self.framework ) __SCREAMING_SNAKE_CASE = candidate_labels __SCREAMING_SNAKE_CASE = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels] __SCREAMING_SNAKE_CASE = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [text_inputs] return inputs def _a ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = model_inputs.pop('''candidate_labels''' ) __SCREAMING_SNAKE_CASE = model_inputs.pop('''text_inputs''' ) if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = text_inputs[0] else: # Batching case. __SCREAMING_SNAKE_CASE = text_inputs[0][0] __SCREAMING_SNAKE_CASE = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = { '''candidate_labels''': candidate_labels, '''logits''': outputs.logits_per_image, } return model_outputs def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = model_outputs.pop('''candidate_labels''' ) __SCREAMING_SNAKE_CASE = model_outputs['''logits'''][0] if self.framework == "pt": __SCREAMING_SNAKE_CASE = logits.softmax(dim=-1 ).squeeze(-1 ) __SCREAMING_SNAKE_CASE = probs.tolist() if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = [scores] elif self.framework == "tf": __SCREAMING_SNAKE_CASE = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 ) __SCREAMING_SNAKE_CASE = probs.numpy().tolist() else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) __SCREAMING_SNAKE_CASE = [ {'''score''': score, '''label''': candidate_label} for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : -x[0] ) ] return result
690
0
"""simple docstring""" import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class A__( __snake_case , unittest.TestCase ): lowerCAmelCase = TransfoXLTokenizer lowerCAmelCase = False lowerCAmelCase = False def _a ( self : List[Any] ) -> Optional[int]: """simple docstring""" super().setUp() __SCREAMING_SNAKE_CASE = [ """<unk>""", """[CLS]""", """[SEP]""", """want""", """unwanted""", """wa""", """un""", """running""", """,""", """low""", """l""", ] __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def _a ( self : int , **__SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **_lowercase ) def _a ( self : int , __SCREAMING_SNAKE_CASE : Optional[int] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = """<unk> UNwanted , running""" __SCREAMING_SNAKE_CASE = """<unk> unwanted, running""" return input_text, output_text def _a ( self : Dict ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=_lowercase ) __SCREAMING_SNAKE_CASE = tokenizer.tokenize('''<unk> UNwanted , running''' ) self.assertListEqual(_lowercase , ['''<unk>''', '''unwanted''', ''',''', '''running'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [0, 4, 8, 7] ) def _a ( self : Tuple ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = TransfoXLTokenizer(lower_case=_lowercase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) def _a ( self : Tuple ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = TransfoXLTokenizer(lower_case=_lowercase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def _a ( self : List[str] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = TransfoXLTokenizer(lower_case=_lowercase ) __SCREAMING_SNAKE_CASE = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?""" __SCREAMING_SNAKE_CASE = [ """Hello""", """(""", """bracket""", """)""", """and""", """side""", """@-@""", """scrolled""", """[""", """and""", """]""", """Henry""", """'s""", """$""", """5""", """@,@""", """000""", """with""", """3""", """@.@""", """34""", """m""", """.""", """What""", """'s""", """up""", """!""", """?""", ] self.assertListEqual(tokenizer.tokenize(_lowercase ) , _lowercase ) self.assertEqual(tokenizer.convert_tokens_to_string(_lowercase ) , _lowercase ) def _a ( self : Tuple ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.get_tokenizer() __SCREAMING_SNAKE_CASE = len(_lowercase ) tokenizer.add_tokens(['''new1''', '''new2'''] ) tokenizer.move_added_token('''new1''' , 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(_lowercase ) , original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode('''new1''' ) , [1] ) self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
719
"""simple docstring""" from __future__ import annotations from collections.abc import Callable lowerCAmelCase__ =list[list[float | int]] def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> Matrix: __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [[0 for _ in range(size + 1 )] for _ in range(UpperCAmelCase__ )] __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for row in range(UpperCAmelCase__ ): for col in range(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = matrix[row][col] __SCREAMING_SNAKE_CASE = vector[row][0] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 while row < size and col < size: # pivoting __SCREAMING_SNAKE_CASE = max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCAmelCase__ , UpperCAmelCase__ ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = augmented[rowa][col] / augmented[row][col] __SCREAMING_SNAKE_CASE = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , UpperCAmelCase__ ): for row in range(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = augmented[row][col] / augmented[col][col] for cola in range(UpperCAmelCase__ , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(UpperCAmelCase__ ) ] def _a ( UpperCAmelCase__ ) -> Callable[[int], int]: __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [[0 for _ in range(UpperCAmelCase__ )] for _ in range(UpperCAmelCase__ )] __SCREAMING_SNAKE_CASE = [[0] for _ in range(UpperCAmelCase__ )] __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for x_val, y_val in enumerate(UpperCAmelCase__ ): for col in range(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = (x_val + 1) ** (size - col - 1) __SCREAMING_SNAKE_CASE = y_val __SCREAMING_SNAKE_CASE = solve(UpperCAmelCase__ , UpperCAmelCase__ ) def interpolated_func(UpperCAmelCase__ ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(UpperCAmelCase__ ) ) return interpolated_func def _a ( UpperCAmelCase__ ) -> int: return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def _a ( UpperCAmelCase__ = question_function , UpperCAmelCase__ = 10 ) -> int: __SCREAMING_SNAKE_CASE = [func(UpperCAmelCase__ ) for x_val in range(1 , order + 1 )] __SCREAMING_SNAKE_CASE = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for poly in polynomials: __SCREAMING_SNAKE_CASE = 1 while func(UpperCAmelCase__ ) == poly(UpperCAmelCase__ ): x_val += 1 ret += poly(UpperCAmelCase__ ) return ret if __name__ == "__main__": print(F'''{solution() = }''')
690
0
"""simple docstring""" lowerCAmelCase__ ={ 'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.', 'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.', 'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-', 'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----', '2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...', '8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.', ':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.', '?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-', '(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/' } # Exclamation mark is not in ITU-R recommendation # fmt: on lowerCAmelCase__ ={value: key for key, value in MORSE_CODE_DICT.items()} def _a ( UpperCAmelCase__ ) -> str: return " ".join(MORSE_CODE_DICT[char] for char in message.upper() ) def _a ( UpperCAmelCase__ ) -> str: return "".join(REVERSE_DICT[char] for char in message.split() ) def _a ( ) -> None: __SCREAMING_SNAKE_CASE = """Morse code here!""" print(lowercase_ ) __SCREAMING_SNAKE_CASE = encrypt(lowercase_ ) print(lowercase_ ) __SCREAMING_SNAKE_CASE = decrypt(lowercase_ ) print(lowercase_ ) if __name__ == "__main__": main()
720
"""simple docstring""" from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def _a ( UpperCAmelCase__ = "isbn/0140328726" ) -> dict: __SCREAMING_SNAKE_CASE = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes if new_olid.count('''/''' ) != 1: __SCREAMING_SNAKE_CASE = f"""{olid} is not a valid Open Library olid""" raise ValueError(UpperCAmelCase__ ) return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json() def _a ( UpperCAmelCase__ ) -> dict: __SCREAMING_SNAKE_CASE = { '''title''': '''Title''', '''publish_date''': '''Publish date''', '''authors''': '''Authors''', '''number_of_pages''': '''Number of pages:''', '''first_sentence''': '''First sentence''', '''isbn_10''': '''ISBN (10)''', '''isbn_13''': '''ISBN (13)''', } __SCREAMING_SNAKE_CASE = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} __SCREAMING_SNAKE_CASE = [ get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors'''] ] __SCREAMING_SNAKE_CASE = data['''First sentence''']['''value'''] for key, value in data.items(): if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = ''', '''.join(UpperCAmelCase__ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: lowerCAmelCase__ =input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(F'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''') continue print(F'''\nSearching Open Library for ISBN: {isbn}...\n''') try: lowerCAmelCase__ =summarize_book(get_openlibrary_data(F'''isbn/{isbn}''')) print("\n".join(F'''{key}: {value}''' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(F'''Sorry, there are no results for ISBN: {isbn}.''')
690
0
"""simple docstring""" import enum import warnings from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING lowerCAmelCase__ =logging.get_logger(__name__) class A__( enum.Enum ): lowerCAmelCase = 0 lowerCAmelCase = 1 @add_end_docstrings(__SCREAMING_SNAKE_CASE ) class A__( __SCREAMING_SNAKE_CASE ): lowerCAmelCase = 'generated' def __init__( self : int , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : List[str] ) -> int: """simple docstring""" super().__init__(*_a , **_a ) self.check_model_type( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING ) def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : Any , ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = {} if truncation is not None: __SCREAMING_SNAKE_CASE = truncation __SCREAMING_SNAKE_CASE = generate_kwargs __SCREAMING_SNAKE_CASE = {} if return_tensors is not None and return_type is None: __SCREAMING_SNAKE_CASE = ReturnType.TENSORS if return_tensors else ReturnType.TEXT if return_type is not None: __SCREAMING_SNAKE_CASE = return_type if clean_up_tokenization_spaces is not None: __SCREAMING_SNAKE_CASE = clean_up_tokenization_spaces if stop_sequence is not None: __SCREAMING_SNAKE_CASE = self.tokenizer.encode(_a , add_special_tokens=_a ) if len(_a ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) __SCREAMING_SNAKE_CASE = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]: """simple docstring""" return True def _a ( self : Any , *__SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model.config.prefix if self.model.config.prefix is not None else '''''' if isinstance(args[0] , _a ): if self.tokenizer.pad_token_id is None: raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' ) __SCREAMING_SNAKE_CASE = ([prefix + arg for arg in args[0]],) __SCREAMING_SNAKE_CASE = True elif isinstance(args[0] , _a ): __SCREAMING_SNAKE_CASE = (prefix + args[0],) __SCREAMING_SNAKE_CASE = False else: raise ValueError( f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" ) __SCREAMING_SNAKE_CASE = self.tokenizer(*_a , padding=_a , truncation=_a , return_tensors=self.framework ) # This is produced by tokenizers but is an invalid generate kwargs if "token_type_ids" in inputs: del inputs["token_type_ids"] return inputs def __call__( self : int , *__SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : str ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = super().__call__(*_a , **_a ) if ( isinstance(args[0] , _a ) and all(isinstance(_a , _a ) for el in args[0] ) and all(len(_a ) == 1 for res in result ) ): return [res[0] for res in result] return result def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=TruncationStrategy.DO_NOT_TRUNCATE , **__SCREAMING_SNAKE_CASE : str ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self._parse_and_tokenize(_a , truncation=_a , **_a ) return inputs def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : str ) -> Tuple: """simple docstring""" if self.framework == "pt": __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model_inputs['''input_ids'''].shape elif self.framework == "tf": __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = tf.shape(model_inputs['''input_ids'''] ).numpy() __SCREAMING_SNAKE_CASE = generate_kwargs.get('''min_length''' , self.model.config.min_length ) __SCREAMING_SNAKE_CASE = generate_kwargs.get('''max_length''' , self.model.config.max_length ) self.check_inputs(_a , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] ) __SCREAMING_SNAKE_CASE = self.model.generate(**_a , **_a ) __SCREAMING_SNAKE_CASE = output_ids.shape[0] if self.framework == "pt": __SCREAMING_SNAKE_CASE = output_ids.reshape(_a , out_b // in_b , *output_ids.shape[1:] ) elif self.framework == "tf": __SCREAMING_SNAKE_CASE = tf.reshape(_a , (in_b, out_b // in_b, *output_ids.shape[1:]) ) return {"output_ids": output_ids} def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int]=ReturnType.TEXT , __SCREAMING_SNAKE_CASE : Union[str, Any]=False ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = [] for output_ids in model_outputs["output_ids"][0]: if return_type == ReturnType.TENSORS: __SCREAMING_SNAKE_CASE = {f"""{self.return_name}_token_ids""": output_ids} elif return_type == ReturnType.TEXT: __SCREAMING_SNAKE_CASE = { f"""{self.return_name}_text""": self.tokenizer.decode( _a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) } records.append(_a ) return records @add_end_docstrings(__SCREAMING_SNAKE_CASE ) class A__( __SCREAMING_SNAKE_CASE ): lowerCAmelCase = 'summary' def __call__( self : Optional[Any] , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> List[Any]: """simple docstring""" return super().__call__(*_a , **_a ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]: """simple docstring""" if max_length < min_length: logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" ) if input_length < max_length: logger.warning( f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """ '''a summarization task, where outputs shorter than the input are typically wanted, you might ''' f"""consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})""" ) @add_end_docstrings(__SCREAMING_SNAKE_CASE ) class A__( __SCREAMING_SNAKE_CASE ): lowerCAmelCase = 'translation' def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any ) -> List[Any]: """simple docstring""" if input_length > 0.9 * max_length: logger.warning( f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """ '''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' ) return True def _a ( self : Tuple , *__SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : int=None ) -> List[Any]: """simple docstring""" if getattr(self.tokenizer , '''_build_translation_inputs''' , _a ): return self.tokenizer._build_translation_inputs( *_a , return_tensors=self.framework , truncation=_a , src_lang=_a , tgt_lang=_a ) else: return super()._parse_and_tokenize(*_a , truncation=_a ) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = super()._sanitize_parameters(**_a ) if src_lang is not None: __SCREAMING_SNAKE_CASE = src_lang if tgt_lang is not None: __SCREAMING_SNAKE_CASE = tgt_lang if src_lang is None and tgt_lang is None: # Backward compatibility, direct arguments use is preferred. __SCREAMING_SNAKE_CASE = kwargs.get('''task''' , self.task ) __SCREAMING_SNAKE_CASE = task.split('''_''' ) if task and len(_a ) == 4: # translation, XX, to YY __SCREAMING_SNAKE_CASE = items[1] __SCREAMING_SNAKE_CASE = items[3] return preprocess_params, forward_params, postprocess_params def __call__( self : Union[str, Any] , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : str ) -> str: """simple docstring""" return super().__call__(*_a , **_a )
721
"""simple docstring""" from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCAmelCase__ =logging.get_logger(__name__) class A__( __magic_name__ ): lowerCAmelCase = ['''audio_values''', '''audio_mask'''] def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any]=20_48 , __SCREAMING_SNAKE_CASE : str=1 , __SCREAMING_SNAKE_CASE : List[Any]=[16, 16] , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_28 , __SCREAMING_SNAKE_CASE : int=4_41_00 , __SCREAMING_SNAKE_CASE : Union[str, Any]=86 , __SCREAMING_SNAKE_CASE : str=20_48 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> Any: """simple docstring""" super().__init__( feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = spectrogram_length __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = patch_size __SCREAMING_SNAKE_CASE = feature_size // self.patch_size[1] __SCREAMING_SNAKE_CASE = n_fft __SCREAMING_SNAKE_CASE = sampling_rate // hop_length_to_sampling_rate __SCREAMING_SNAKE_CASE = sampling_rate __SCREAMING_SNAKE_CASE = padding_value __SCREAMING_SNAKE_CASE = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , ).T def _a ( self : str , __SCREAMING_SNAKE_CASE : np.array ) -> np.ndarray: """simple docstring""" __SCREAMING_SNAKE_CASE = spectrogram( __SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , ) __SCREAMING_SNAKE_CASE = log_spec[:, :-1] __SCREAMING_SNAKE_CASE = log_spec - 20.0 __SCREAMING_SNAKE_CASE = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : str , __SCREAMING_SNAKE_CASE : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = True , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , **__SCREAMING_SNAKE_CASE : Tuple , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( '''This feature extractor is set to support sampling rate''' f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled""" f""" with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) __SCREAMING_SNAKE_CASE = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) __SCREAMING_SNAKE_CASE = is_batched_numpy or ( isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __SCREAMING_SNAKE_CASE = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ): __SCREAMING_SNAKE_CASE = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __SCREAMING_SNAKE_CASE = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __SCREAMING_SNAKE_CASE = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis __SCREAMING_SNAKE_CASE = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask __SCREAMING_SNAKE_CASE = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: __SCREAMING_SNAKE_CASE = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] __SCREAMING_SNAKE_CASE = np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa ) # convert into correct format for padding __SCREAMING_SNAKE_CASE = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch __SCREAMING_SNAKE_CASE = np.ones([len(__SCREAMING_SNAKE_CASE ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) __SCREAMING_SNAKE_CASE = padded_audio_features * self.padding_value for i in range(len(__SCREAMING_SNAKE_CASE ) ): __SCREAMING_SNAKE_CASE = audio_features[i] __SCREAMING_SNAKE_CASE = feature # return as BatchFeature if return_attention_mask: __SCREAMING_SNAKE_CASE = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask} else: __SCREAMING_SNAKE_CASE = {'''audio_values''': padded_audio_features} __SCREAMING_SNAKE_CASE = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE ) return encoded_inputs
690
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase__ ={ """configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""], """tokenization_m2m_100""": ["""M2M100Tokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =[ """M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""", """M2M100ForConditionalGeneration""", """M2M100Model""", """M2M100PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys lowerCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
700
"""simple docstring""" def _a ( UpperCAmelCase__ ) -> str: __SCREAMING_SNAKE_CASE = '''''' for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def _a ( UpperCAmelCase__ ) -> dict[str, str]: __SCREAMING_SNAKE_CASE = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key __SCREAMING_SNAKE_CASE = remove_duplicates(key.upper() ) __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) # First fill cipher with key characters __SCREAMING_SNAKE_CASE = {alphabet[i]: char for i, char in enumerate(UpperCAmelCase__ )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(UpperCAmelCase__ ) , 26 ): __SCREAMING_SNAKE_CASE = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 __SCREAMING_SNAKE_CASE = alphabet[i - offset] __SCREAMING_SNAKE_CASE = char return cipher_alphabet def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> str: return "".join(cipher_map.get(UpperCAmelCase__ , UpperCAmelCase__ ) for ch in message.upper() ) def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> str: __SCREAMING_SNAKE_CASE = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(UpperCAmelCase__ , UpperCAmelCase__ ) for ch in message.upper() ) def _a ( ) -> None: __SCREAMING_SNAKE_CASE = input('''Enter message to encode or decode: ''' ).strip() __SCREAMING_SNAKE_CASE = input('''Enter keyword: ''' ).strip() __SCREAMING_SNAKE_CASE = input('''Encipher or decipher? E/D:''' ).strip()[0].lower() try: __SCREAMING_SNAKE_CASE = {'''e''': encipher, '''d''': decipher}[option] except KeyError: raise KeyError('''invalid input option''' ) __SCREAMING_SNAKE_CASE = create_cipher_map(UpperCAmelCase__ ) print(func(UpperCAmelCase__ , UpperCAmelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
690
0
"""simple docstring""" import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class A__( unittest.TestCase ): def __init__( self : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int=7 , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : Optional[int]=30 , __SCREAMING_SNAKE_CASE : Optional[int]=4_00 , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Any=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Any=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : List[str]=1 / 2_55 , __SCREAMING_SNAKE_CASE : Tuple=True , ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33} __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = min_resolution __SCREAMING_SNAKE_CASE = max_resolution __SCREAMING_SNAKE_CASE = do_resize __SCREAMING_SNAKE_CASE = size __SCREAMING_SNAKE_CASE = do_normalize __SCREAMING_SNAKE_CASE = image_mean __SCREAMING_SNAKE_CASE = image_std __SCREAMING_SNAKE_CASE = do_rescale __SCREAMING_SNAKE_CASE = rescale_factor __SCREAMING_SNAKE_CASE = do_pad def _a ( self : int ) -> Optional[int]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _a ( self : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=False ) -> Dict: """simple docstring""" if not batched: __SCREAMING_SNAKE_CASE = image_inputs[0] if isinstance(__SCREAMING_SNAKE_CASE , Image.Image ): __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = image.size else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2] if w < h: __SCREAMING_SNAKE_CASE = int(self.size['''shortest_edge'''] * h / w ) __SCREAMING_SNAKE_CASE = self.size['''shortest_edge'''] elif w > h: __SCREAMING_SNAKE_CASE = self.size['''shortest_edge'''] __SCREAMING_SNAKE_CASE = int(self.size['''shortest_edge'''] * w / h ) else: __SCREAMING_SNAKE_CASE = self.size['''shortest_edge'''] __SCREAMING_SNAKE_CASE = self.size['''shortest_edge'''] else: __SCREAMING_SNAKE_CASE = [] for image in image_inputs: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __SCREAMING_SNAKE_CASE = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[0] )[0] __SCREAMING_SNAKE_CASE = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class A__( snake_case_ , unittest.TestCase ): lowerCAmelCase = DetaImageProcessor if is_vision_available() else None def _a ( self : List[Any] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = DetaImageProcessingTester(self ) @property def _a ( self : str ) -> Tuple: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _a ( self : List[str] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_rescale''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_pad''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) ) def _a ( self : Optional[Any] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} ) self.assertEqual(image_processor.do_pad , __SCREAMING_SNAKE_CASE ) def _a ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" pass def _a ( self : List[str] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _a ( self : Optional[int] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __SCREAMING_SNAKE_CASE = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _a ( self : str ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __SCREAMING_SNAKE_CASE = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def _a ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: __SCREAMING_SNAKE_CASE = json.loads(f.read() ) __SCREAMING_SNAKE_CASE = {'''image_id''': 3_97_69, '''annotations''': target} # encode them __SCREAMING_SNAKE_CASE = DetaImageProcessor() __SCREAMING_SNAKE_CASE = image_processing(images=__SCREAMING_SNAKE_CASE , annotations=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) # verify pixel values __SCREAMING_SNAKE_CASE = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) ) # verify area __SCREAMING_SNAKE_CASE = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __SCREAMING_SNAKE_CASE ) ) # verify boxes __SCREAMING_SNAKE_CASE = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __SCREAMING_SNAKE_CASE , atol=1E-3 ) ) # verify image_id __SCREAMING_SNAKE_CASE = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __SCREAMING_SNAKE_CASE ) ) # verify is_crowd __SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __SCREAMING_SNAKE_CASE ) ) # verify class_labels __SCREAMING_SNAKE_CASE = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __SCREAMING_SNAKE_CASE ) ) # verify orig_size __SCREAMING_SNAKE_CASE = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __SCREAMING_SNAKE_CASE ) ) # verify size __SCREAMING_SNAKE_CASE = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __SCREAMING_SNAKE_CASE ) ) @slow def _a ( self : Optional[int] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: __SCREAMING_SNAKE_CASE = json.loads(f.read() ) __SCREAMING_SNAKE_CASE = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target} __SCREAMING_SNAKE_CASE = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them __SCREAMING_SNAKE_CASE = DetaImageProcessor(format='''coco_panoptic''' ) __SCREAMING_SNAKE_CASE = image_processing(images=__SCREAMING_SNAKE_CASE , annotations=__SCREAMING_SNAKE_CASE , masks_path=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) # verify pixel values __SCREAMING_SNAKE_CASE = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) ) # verify area __SCREAMING_SNAKE_CASE = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __SCREAMING_SNAKE_CASE ) ) # verify boxes __SCREAMING_SNAKE_CASE = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __SCREAMING_SNAKE_CASE , atol=1E-3 ) ) # verify image_id __SCREAMING_SNAKE_CASE = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __SCREAMING_SNAKE_CASE ) ) # verify is_crowd __SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __SCREAMING_SNAKE_CASE ) ) # verify class_labels __SCREAMING_SNAKE_CASE = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __SCREAMING_SNAKE_CASE ) ) # verify masks __SCREAMING_SNAKE_CASE = 82_28_73 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __SCREAMING_SNAKE_CASE ) # verify orig_size __SCREAMING_SNAKE_CASE = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __SCREAMING_SNAKE_CASE ) ) # verify size __SCREAMING_SNAKE_CASE = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __SCREAMING_SNAKE_CASE ) )
701
"""simple docstring""" from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__: def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : Optional[Any]=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=10 , __SCREAMING_SNAKE_CASE : str=[10, 20, 30, 40] , __SCREAMING_SNAKE_CASE : Optional[int]=[1, 1, 2, 1] , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Optional[Any]="relu" , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = embeddings_size __SCREAMING_SNAKE_CASE = hidden_sizes __SCREAMING_SNAKE_CASE = depths __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = scope __SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE ) def _a ( self : List[Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels ) __SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values, labels def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def _a ( self : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = TFRegNetModel(config=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _a ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = TFRegNetForImageClassification(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self : Optional[Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs __SCREAMING_SNAKE_CASE = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class A__( __magic_name__ , __magic_name__ , unittest.TestCase ): lowerCAmelCase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () lowerCAmelCase = ( {'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification} if is_tf_available() else {} ) lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False def _a ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFRegNetModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple ) -> Optional[Any]: """simple docstring""" return @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def _a ( self : Any ) -> Optional[Any]: """simple docstring""" pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , ) @slow def _a ( self : Dict ) -> List[Any]: """simple docstring""" super().test_keras_fit() @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def _a ( self : Dict ) -> Union[str, Any]: """simple docstring""" pass def _a ( self : List[Any] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] __SCREAMING_SNAKE_CASE = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE ) def _a ( self : Any ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def _a ( self : List[str] ) -> Tuple: """simple docstring""" def check_hidden_states_output(__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ): __SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , training=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __SCREAMING_SNAKE_CASE = self.model_tester.num_stages self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: __SCREAMING_SNAKE_CASE = layer_type __SCREAMING_SNAKE_CASE = True check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __SCREAMING_SNAKE_CASE = True check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(__SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]={} ): __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).to_tuple() def recursive_check(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict ): if isinstance(__SCREAMING_SNAKE_CASE , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): recursive_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) , msg=( '''Tuple and dict output are not equal. Difference:''' f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}""" ) , ) recursive_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE ) check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , {'''output_hidden_states''': True} ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE ) check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , {'''output_hidden_states''': True} ) def _a ( self : str ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE ) @slow def _a ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = TFRegNetModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def _a ( ) -> Dict: __SCREAMING_SNAKE_CASE = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class A__( unittest.TestCase ): @cached_property def _a ( self : List[Any] ) -> str: """simple docstring""" return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _a ( self : List[str] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) __SCREAMING_SNAKE_CASE = self.default_image_processor __SCREAMING_SNAKE_CASE = prepare_img() __SCREAMING_SNAKE_CASE = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''tf''' ) # forward pass __SCREAMING_SNAKE_CASE = model(**__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE ) # verify the logits __SCREAMING_SNAKE_CASE = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tf.constant([-0.41_80, -1.50_51, -3.48_36] ) tf.debugging.assert_near(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
690
0
"""simple docstring""" import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) lowerCAmelCase__ =logging.getLogger() def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]: __SCREAMING_SNAKE_CASE = "\n".join(SCREAMING_SNAKE_CASE_ ) Path(SCREAMING_SNAKE_CASE_ ).open('''w''' ).writelines(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ ="patrickvonplaten/t5-tiny-random" lowerCAmelCase__ ="sshleifer/bart-tiny-random" lowerCAmelCase__ ="sshleifer/tiny-mbart" lowerCAmelCase__ =logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class A__( _UpperCAmelCase ): def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source" __SCREAMING_SNAKE_CASE = input_file_name.parent / "utest_output.txt" assert not output_file_name.exists() __SCREAMING_SNAKE_CASE = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."] _dump_articles(lowercase__ , lowercase__ ) __SCREAMING_SNAKE_CASE = str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' ) __SCREAMING_SNAKE_CASE = "translation_en_to_de" if model == T5_TINY else "summarization" __SCREAMING_SNAKE_CASE = f"""\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n """.split() with patch.object(lowercase__ , '''argv''' , lowercase__ ): run_generate() assert Path(lowercase__ ).exists() # os.remove(Path(output_file_name)) def _a ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" self.run_eval_tester(lowercase__ ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Tuple: """simple docstring""" self.run_eval_tester(lowercase__ ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def _a ( self : int , __SCREAMING_SNAKE_CASE : str ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source" __SCREAMING_SNAKE_CASE = input_file_name.parent / "utest_output.txt" assert not output_file_name.exists() __SCREAMING_SNAKE_CASE = { "en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"], "de": [ "Maschinelles Lernen ist großartig, oder?", "Ich esse gerne Bananen", "Morgen ist wieder ein toller Tag!", ], } __SCREAMING_SNAKE_CASE = Path(self.get_auto_remove_tmp_dir() ) __SCREAMING_SNAKE_CASE = str(tmp_dir / '''scores.json''' ) __SCREAMING_SNAKE_CASE = str(tmp_dir / '''val.target''' ) _dump_articles(lowercase__ , text['''en'''] ) _dump_articles(lowercase__ , text['''de'''] ) __SCREAMING_SNAKE_CASE = "translation_en_to_de" if model == T5_TINY else "summarization" __SCREAMING_SNAKE_CASE = f"""\n run_eval_search.py\n {model}\n {str(lowercase__ )}\n {str(lowercase__ )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n """.split() testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] ) with patch.object(lowercase__ , '''argv''' , lowercase__ ): with CaptureStdout() as cs: run_search() __SCREAMING_SNAKE_CASE = [" num_beams | length_penalty", model, "Best score args"] __SCREAMING_SNAKE_CASE = ["Info"] if "translation" in task: expected_strings.append('''bleu''' ) else: expected_strings.extend(lowercase__ ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(lowercase__ ).exists() os.remove(Path(lowercase__ ) )
702
"""simple docstring""" import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ =get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class A__( __magic_name__ , unittest.TestCase ): lowerCAmelCase = XLMRobertaTokenizer lowerCAmelCase = XLMRobertaTokenizerFast lowerCAmelCase = True lowerCAmelCase = True def _a ( self : Optional[int] ) -> List[Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE ) tokenizer.save_pretrained(self.tmpdirname ) def _a ( self : str ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = '''<pad>''' __SCREAMING_SNAKE_CASE = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) def _a ( self : int ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_02 ) def _a ( self : Tuple ) -> Optional[int]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 10_02 ) def _a ( self : int ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) __SCREAMING_SNAKE_CASE = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( __SCREAMING_SNAKE_CASE , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) __SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) self.assertListEqual( __SCREAMING_SNAKE_CASE , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) __SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ) self.assertListEqual( __SCREAMING_SNAKE_CASE , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def _a ( self : int ) -> Tuple: """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __SCREAMING_SNAKE_CASE = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) __SCREAMING_SNAKE_CASE = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Checks everything loads correctly in the same way __SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(__SCREAMING_SNAKE_CASE ) # Save tokenizer rust, legacy_format=True __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE ) # Checks it save with the same files self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Checks everything loads correctly in the same way __SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) shutil.rmtree(__SCREAMING_SNAKE_CASE ) # Save tokenizer rust, legacy_format=False __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way __SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) shutil.rmtree(__SCREAMING_SNAKE_CASE ) @cached_property def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' ) def _a ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" with tempfile.NamedTemporaryFile() as f: shutil.copyfile(__SCREAMING_SNAKE_CASE , f.name ) __SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(f.name , keep_accents=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = pickle.dumps(__SCREAMING_SNAKE_CASE ) pickle.loads(__SCREAMING_SNAKE_CASE ) def _a ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" if not self.test_rust_tokenizer: return __SCREAMING_SNAKE_CASE = self.get_tokenizer() __SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE = '''I was born in 92000, and this is falsé.''' __SCREAMING_SNAKE_CASE = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE = tokenizer.encode(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @slow def _a ( self : Any ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = '''Hello World!''' __SCREAMING_SNAKE_CASE = [0, 3_53_78, 66_61, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) ) @slow def _a ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) __SCREAMING_SNAKE_CASE = [ 0, 32_93, 83, 10, 45_52, 49_89, 79_86, 6_78, 10, 59_15, 1_11, 17_94_59, 12_48_50, 4, 60_44, 2_37, 12, 6, 5, 6, 4, 67_80, 7_05, 15, 13_88, 44, 3_78, 1_01_14, 7_11, 1_52, 20, 6, 5, 2_23_76, 6_42, 12_21, 1_51_90, 3_41_53, 4_50, 56_08, 9_59, 11_19, 5_77_02, 1_36, 1_86, 47, 10_98, 2_93_67, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 60_44, 2_37, 62_84, 5_09_01, 5_28, 31, 90, 34, 9_27, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) ) @slow def _a ( self : Optional[int] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = {'''input_ids''': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
690
0
"""simple docstring""" def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> str: if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) __SCREAMING_SNAKE_CASE = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b" __SCREAMING_SNAKE_CASE = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b" __SCREAMING_SNAKE_CASE = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
703
"""simple docstring""" from __future__ import annotations lowerCAmelCase__ =8.9_8_8E9 # units = N * m^s * C^-2 def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> dict[str, float]: __SCREAMING_SNAKE_CASE = abs(chargea * chargea ) if (force, chargea, chargea, distance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if distance < 0: raise ValueError('''Distance cannot be negative''' ) if force == 0: __SCREAMING_SNAKE_CASE = COULOMBS_CONSTANT * charge_product / (distance**2) return {"force": force} elif chargea == 0: __SCREAMING_SNAKE_CASE = abs(UpperCAmelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge1": chargea} elif chargea == 0: __SCREAMING_SNAKE_CASE = abs(UpperCAmelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge2": chargea} elif distance == 0: __SCREAMING_SNAKE_CASE = (COULOMBS_CONSTANT * charge_product / abs(UpperCAmelCase__ )) ** 0.5 return {"distance": distance} raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
690
0
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class A__: lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None # sigma(t_i) @classmethod def _a ( cls : int ) -> List[Any]: """simple docstring""" return cls() @dataclass class A__( __A ): lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 42 class A__( __A , __A ): @property def _a ( self : Optional[Any] ) -> List[str]: """simple docstring""" return True @register_to_config def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] = 0.02 , __SCREAMING_SNAKE_CASE : List[str] = 1_00 , __SCREAMING_SNAKE_CASE : Optional[Any] = 1.0_07 , __SCREAMING_SNAKE_CASE : Tuple = 80 , __SCREAMING_SNAKE_CASE : List[Any] = 0.05 , __SCREAMING_SNAKE_CASE : Optional[int] = 50 , ) -> Union[str, Any]: """simple docstring""" pass def _a ( self : Tuple ) -> List[Any]: """simple docstring""" return KarrasVeSchedulerState.create() def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] = () ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = jnp.arange(0 , __SCREAMING_SNAKE_CASE )[::-1].copy() __SCREAMING_SNAKE_CASE = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=__SCREAMING_SNAKE_CASE , schedule=jnp.array(__SCREAMING_SNAKE_CASE , dtype=jnp.floataa ) , timesteps=__SCREAMING_SNAKE_CASE , ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , ) -> List[Any]: """simple docstring""" if self.config.s_min <= sigma <= self.config.s_max: __SCREAMING_SNAKE_CASE = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 ) else: __SCREAMING_SNAKE_CASE = 0 # sample eps ~ N(0, S_noise^2 * I) __SCREAMING_SNAKE_CASE = random.split(__SCREAMING_SNAKE_CASE , num=1 ) __SCREAMING_SNAKE_CASE = self.config.s_noise * random.normal(key=__SCREAMING_SNAKE_CASE , shape=sample.shape ) __SCREAMING_SNAKE_CASE = sigma + gamma * sigma __SCREAMING_SNAKE_CASE = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] = True , ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = sample_hat + sigma_hat * model_output __SCREAMING_SNAKE_CASE = (sample_hat - pred_original_sample) / sigma_hat __SCREAMING_SNAKE_CASE = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__SCREAMING_SNAKE_CASE , derivative=__SCREAMING_SNAKE_CASE , state=__SCREAMING_SNAKE_CASE ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple = True , ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = sample_prev + sigma_prev * model_output __SCREAMING_SNAKE_CASE = (sample_prev - pred_original_sample) / sigma_prev __SCREAMING_SNAKE_CASE = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__SCREAMING_SNAKE_CASE , derivative=__SCREAMING_SNAKE_CASE , state=__SCREAMING_SNAKE_CASE ) def _a ( self : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict ) -> Dict: """simple docstring""" raise NotImplementedError()
704
"""simple docstring""" import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ =logging.get_logger(__name__) def _a ( UpperCAmelCase__ ) -> Tuple: __SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , map_location='''cpu''' ) if "model" in sd.keys(): __SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , map_location='''cpu''' )['''model'''] # pop unnecessary weights __SCREAMING_SNAKE_CASE = [ '''decoder.version''', '''decoder.output_projection.weight''', ] for key in keys_to_delete: if key in sd: sd.pop(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = { '''decoder.project_in_dim.weight''': '''decoder.project_in.weight''', '''decoder.project_out_dim.weight''': '''decoder.project_out.weight''', '''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''', } for old_key, new_key in keys_to_rename.items(): if old_key in sd: __SCREAMING_SNAKE_CASE = sd.pop(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = list(sd.keys() ) for key in keys: if ".qkv_proj." in key: __SCREAMING_SNAKE_CASE = sd[key] # We split QKV in separate Q,K,V __SCREAMING_SNAKE_CASE = key.replace('''.qkv_proj.''' , '''.q_proj.''' ) __SCREAMING_SNAKE_CASE = key.replace('''.qkv_proj.''' , '''.k_proj.''' ) __SCREAMING_SNAKE_CASE = key.replace('''.qkv_proj.''' , '''.v_proj.''' ) __SCREAMING_SNAKE_CASE = value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch.split(UpperCAmelCase__ , depth // 3 , dim=0 ) __SCREAMING_SNAKE_CASE = q __SCREAMING_SNAKE_CASE = k __SCREAMING_SNAKE_CASE = v del sd[key] return sd @torch.no_grad() def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = load_checkpoint(UpperCAmelCase__ ) if config is not None: __SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained(UpperCAmelCase__ ) else: __SCREAMING_SNAKE_CASE = OPTConfig() __SCREAMING_SNAKE_CASE = OPTModel(UpperCAmelCase__ ).half().eval() model.load_state_dict(UpperCAmelCase__ ) # Check results Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ ) model.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": lowerCAmelCase__ =argparse.ArgumentParser() # Required parameters parser.add_argument( "--fairseq_path", type=str, help=( "path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:" " https://huggingface.co/models?other=opt_metasq" ), ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.") lowerCAmelCase__ =parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
690
0
from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class A__: lowerCAmelCase = 42 # [batch_size x 3] lowerCAmelCase = 42 # [batch_size x 3] lowerCAmelCase = 42 # [batch_size x 3] lowerCAmelCase = 42 # [batch_size x 3] lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 42 def _a ( self : Union[str, Any] ) -> Tuple: """simple docstring""" assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2 def _a ( self : Tuple ) -> List[str]: """simple docstring""" return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) ) def _a ( self : List[str] ) -> List[str]: """simple docstring""" return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) ) def _a ( self : List[str] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = torch.arange(self.height * self.width ) __SCREAMING_SNAKE_CASE = torch.stack( [ pixel_indices % self.width, torch.div(_A , self.width , rounding_mode='''trunc''' ), ] , axis=1 , ) return coords @property def _a ( self : Optional[Any] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE = self.shape __SCREAMING_SNAKE_CASE = int(np.prod(_A ) ) __SCREAMING_SNAKE_CASE = self.get_image_coords() __SCREAMING_SNAKE_CASE = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] ) __SCREAMING_SNAKE_CASE = self.get_camera_rays(_A ) __SCREAMING_SNAKE_CASE = rays.view(_A , inner_batch_size * self.height * self.width , 2 , 3 ) return rays def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : torch.Tensor ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] __SCREAMING_SNAKE_CASE = coords.view(_A , -1 , 2 ) __SCREAMING_SNAKE_CASE = self.resolution() __SCREAMING_SNAKE_CASE = self.fov() __SCREAMING_SNAKE_CASE = (flat.float() / (res - 1)) * 2 - 1 __SCREAMING_SNAKE_CASE = fracs * torch.tan(fov / 2 ) __SCREAMING_SNAKE_CASE = fracs.view(_A , -1 , 2 ) __SCREAMING_SNAKE_CASE = ( self.z.view(_A , 1 , 3 ) + self.x.view(_A , 1 , 3 ) * fracs[:, :, :1] + self.y.view(_A , 1 , 3 ) * fracs[:, :, 1:] ) __SCREAMING_SNAKE_CASE = directions / directions.norm(dim=-1 , keepdim=_A ) __SCREAMING_SNAKE_CASE = torch.stack( [ torch.broadcast_to(self.origin.view(_A , 1 , 3 ) , [batch_size, directions.shape[1], 3] ), directions, ] , dim=2 , ) return rays.view(_A , *_A , 2 , 3 ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> List[Any]: """simple docstring""" assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin , x=self.x , y=self.y , z=self.z , width=_A , height=_A , x_fov=self.x_fov , y_fov=self.y_fov , ) def _a ( UpperCAmelCase__ ) -> int: __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] for theta in np.linspace(0 , 2 * np.pi , num=20 ): __SCREAMING_SNAKE_CASE = np.array([np.sin(__snake_case ), np.cos(__snake_case ), -0.5] ) z /= np.sqrt(np.sum(z**2 ) ) __SCREAMING_SNAKE_CASE = -z * 4 __SCREAMING_SNAKE_CASE = np.array([np.cos(__snake_case ), -np.sin(__snake_case ), 0.0] ) __SCREAMING_SNAKE_CASE = np.cross(__snake_case , __snake_case ) origins.append(__snake_case ) xs.append(__snake_case ) ys.append(__snake_case ) zs.append(__snake_case ) return DifferentiableProjectiveCamera( origin=torch.from_numpy(np.stack(__snake_case , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__snake_case , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__snake_case , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__snake_case , axis=0 ) ).float() , width=__snake_case , height=__snake_case , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__snake_case )) , )
705
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class A__( __magic_name__ ): lowerCAmelCase = '''naver-clova-ix/donut-base-finetuned-docvqa''' lowerCAmelCase = ( '''This is a tool that answers a question about an document (pdf). It takes an input named `document` which ''' '''should be the document containing the information, as well as a `question` that is the question about the ''' '''document. It returns a text that contains the answer to the question.''' ) lowerCAmelCase = '''document_qa''' lowerCAmelCase = AutoProcessor lowerCAmelCase = VisionEncoderDecoderModel lowerCAmelCase = ['''image''', '''text'''] lowerCAmelCase = ['''text'''] def __init__( self : str , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Any: """simple docstring""" if not is_vision_available(): raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' ) super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : "Image" , __SCREAMING_SNAKE_CASE : str ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>''' __SCREAMING_SNAKE_CASE = task_prompt.replace('''{user_input}''' , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.pre_processor.tokenizer( __SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_ids __SCREAMING_SNAKE_CASE = self.pre_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple: """simple docstring""" return self.model.generate( inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__SCREAMING_SNAKE_CASE , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__SCREAMING_SNAKE_CASE , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__SCREAMING_SNAKE_CASE , ).sequences def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.pre_processor.batch_decode(__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' ) __SCREAMING_SNAKE_CASE = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' ) __SCREAMING_SNAKE_CASE = re.sub(r'''<.*?>''' , '''''' , __SCREAMING_SNAKE_CASE , count=1 ).strip() # remove first task start token __SCREAMING_SNAKE_CASE = self.pre_processor.tokenajson(__SCREAMING_SNAKE_CASE ) return sequence["answer"]
690
0
"""simple docstring""" from ....utils import logging lowerCAmelCase__ =logging.get_logger(__name__) class A__( __a ): def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : List[str]=20_48 ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = config.__dict__ __SCREAMING_SNAKE_CASE = modal_hidden_size if num_labels: __SCREAMING_SNAKE_CASE = num_labels
706
"""simple docstring""" import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class A__( unittest.TestCase ): @property def _a ( self : Optional[Any] ) -> Tuple: """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def _a ( self : str ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.dummy_uncond_unet __SCREAMING_SNAKE_CASE = KarrasVeScheduler() __SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' , return_dict=__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __SCREAMING_SNAKE_CASE = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class A__( unittest.TestCase ): def _a ( self : Any ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = '''google/ncsnpp-celebahq-256''' __SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = KarrasVeScheduler() __SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe(num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) __SCREAMING_SNAKE_CASE = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
690
0
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class A__( __lowerCamelCase ): lowerCAmelCase = ( '''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.''' '''It takes two arguments named `image` which should be the original image, and `label` which should be a text ''' '''describing the elements what should be identified in the segmentation mask. The tool returns the mask.''' ) lowerCAmelCase = '''CIDAS/clipseg-rd64-refined''' lowerCAmelCase = '''image_segmenter''' lowerCAmelCase = CLIPSegForImageSegmentation lowerCAmelCase = ['''image''', '''text'''] lowerCAmelCase = ['''image'''] def __init__( self : Dict , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]: """simple docstring""" requires_backends(self , ['''vision'''] ) super().__init__(*a_ , **a_ ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> str: """simple docstring""" return self.pre_processor(text=[label] , images=[image] , padding=a_ , return_tensors='''pt''' ) def _a ( self : Dict , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]: """simple docstring""" with torch.no_grad(): __SCREAMING_SNAKE_CASE = self.model(**a_ ).logits return logits def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = outputs.cpu().detach().numpy() __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 1 return Image.fromarray((array * 2_55).astype(np.uinta ) )
707
"""simple docstring""" import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase__ =logging.get_logger(__name__) lowerCAmelCase__ ={"vocab_file": "spiece.model"} lowerCAmelCase__ ={ "vocab_file": { "AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model", } } lowerCAmelCase__ ={ "AI-Sweden/gpt-sw3-126m": 2_048, "AI-Sweden/gpt-sw3-350m": 2_048, "AI-Sweden/gpt-sw3-1.6b": 2_048, "AI-Sweden/gpt-sw3-6.7b": 2_048, "AI-Sweden/gpt-sw3-20b": 2_048, } class A__( __magic_name__ ): lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = ['''input_ids''', '''attention_mask'''] def __init__( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Dict , ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs __SCREAMING_SNAKE_CASE = kwargs.get('''name_or_path''' ) if name_or_path is None: logger.warning( '''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,''' ''' you are testing the model, this can safely be ignored''' ) __SCREAMING_SNAKE_CASE = '''None''' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing __SCREAMING_SNAKE_CASE = '''<|endoftext|>''' if eos_token is None else eos_token __SCREAMING_SNAKE_CASE = '''<unk>''' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: __SCREAMING_SNAKE_CASE = unk_token if pad_token is None else pad_token __SCREAMING_SNAKE_CASE = eos_token if bos_token is None else bos_token else: __SCREAMING_SNAKE_CASE = '''<pad>''' if pad_token is None else pad_token __SCREAMING_SNAKE_CASE = '''<s>''' if bos_token is None else bos_token super().__init__( do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = do_lower_case __SCREAMING_SNAKE_CASE = remove_space __SCREAMING_SNAKE_CASE = keep_accents __SCREAMING_SNAKE_CASE = vocab_file __SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__SCREAMING_SNAKE_CASE ) # Used for whitespace normalization in input texts # fmt : off __SCREAMING_SNAKE_CASE = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', '''„'''} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing __SCREAMING_SNAKE_CASE = re.compile( f"""[{"".join(map(__SCREAMING_SNAKE_CASE , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(1_27 , 1_60 ) ) + [1_60, 1_73, 82_03] ) )}]""" ) def __getstate__( self : List[str] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.__dict__.copy() __SCREAMING_SNAKE_CASE = None return state def __setstate__( self : int , __SCREAMING_SNAKE_CASE : Optional[int] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def _a ( self : Optional[Any] ) -> int: """simple docstring""" return len(self.sp_model ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.non_printing_characters_re.sub('''''' , __SCREAMING_SNAKE_CASE ) # Normalize whitespaces __SCREAMING_SNAKE_CASE = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] ) # NFC Unicode normalization __SCREAMING_SNAKE_CASE = unicodedata.normalize('''NFC''' , __SCREAMING_SNAKE_CASE ) return text def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.preprocess_text(__SCREAMING_SNAKE_CASE ) return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : str ) -> int: """simple docstring""" return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> str: """simple docstring""" return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE ) @staticmethod def _a ( __SCREAMING_SNAKE_CASE : str ) -> str: """simple docstring""" return out_string def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = '''''' __SCREAMING_SNAKE_CASE = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = [] else: current_sub_tokens.append(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = False out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) return out_string def _a ( self : Union[str, Any] ) -> Dict[str, int]: """simple docstring""" __SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __SCREAMING_SNAKE_CASE = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi: __SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE ) return (out_vocab_file,) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: """simple docstring""" if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = self.preprocess_text(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.sp_model.encode(__SCREAMING_SNAKE_CASE ) else: __SCREAMING_SNAKE_CASE = [self.preprocess_text(__SCREAMING_SNAKE_CASE ) for t in text] __SCREAMING_SNAKE_CASE = self.sp_model.encode(__SCREAMING_SNAKE_CASE ) if return_tensors is True or return_tensors == "pt": __SCREAMING_SNAKE_CASE = torch.tensor(__SCREAMING_SNAKE_CASE ) return token_ids def _a ( self : Any , __SCREAMING_SNAKE_CASE : Union[int, List[int]] ) -> str: """simple docstring""" return self.sp_model.decode(__SCREAMING_SNAKE_CASE ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : "Conversation" ) -> List[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()] __SCREAMING_SNAKE_CASE = ( f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(__SCREAMING_SNAKE_CASE ) + f"""{self.bos_token}Bot:""" ) return self.encode(text=__SCREAMING_SNAKE_CASE )
690
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowerCAmelCase__ ={ "configuration_efficientformer": [ "EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "EfficientFormerConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =["EfficientFormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =[ "EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "EfficientFormerForImageClassification", "EfficientFormerForImageClassificationWithTeacher", "EfficientFormerModel", "EfficientFormerPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =[ "TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFEfficientFormerForImageClassification", "TFEfficientFormerForImageClassificationWithTeacher", "TFEfficientFormerModel", "TFEfficientFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys lowerCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
708
"""simple docstring""" from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent lowerCAmelCase__ ={"UserAgent": UserAgent().random} def _a ( UpperCAmelCase__ ) -> dict: __SCREAMING_SNAKE_CASE = script.contents[0] __SCREAMING_SNAKE_CASE = json.loads(data[data.find('''{"config"''' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class A__: def __init__( self : Dict , __SCREAMING_SNAKE_CASE : int ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = f"""https://www.instagram.com/{username}/""" __SCREAMING_SNAKE_CASE = self.get_json() def _a ( self : List[Any] ) -> dict: """simple docstring""" __SCREAMING_SNAKE_CASE = requests.get(self.url , headers=__SCREAMING_SNAKE_CASE ).text __SCREAMING_SNAKE_CASE = BeautifulSoup(__SCREAMING_SNAKE_CASE , '''html.parser''' ).find_all('''script''' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : Tuple ) -> str: """simple docstring""" return f"""{self.__class__.__name__}('{self.username}')""" def __str__( self : Optional[int] ) -> str: """simple docstring""" return f"""{self.fullname} ({self.username}) is {self.biography}""" @property def _a ( self : Tuple ) -> str: """simple docstring""" return self.user_data["username"] @property def _a ( self : List[Any] ) -> str: """simple docstring""" return self.user_data["full_name"] @property def _a ( self : Optional[Any] ) -> str: """simple docstring""" return self.user_data["biography"] @property def _a ( self : List[str] ) -> str: """simple docstring""" return self.user_data["business_email"] @property def _a ( self : Any ) -> str: """simple docstring""" return self.user_data["external_url"] @property def _a ( self : Any ) -> int: """simple docstring""" return self.user_data["edge_followed_by"]["count"] @property def _a ( self : Dict ) -> int: """simple docstring""" return self.user_data["edge_follow"]["count"] @property def _a ( self : str ) -> int: """simple docstring""" return self.user_data["edge_owner_to_timeline_media"]["count"] @property def _a ( self : Union[str, Any] ) -> str: """simple docstring""" return self.user_data["profile_pic_url_hd"] @property def _a ( self : Tuple ) -> bool: """simple docstring""" return self.user_data["is_verified"] @property def _a ( self : Union[str, Any] ) -> bool: """simple docstring""" return self.user_data["is_private"] def _a ( UpperCAmelCase__ = "github" ) -> None: import os if os.environ.get('''CI''' ): return # test failing on GitHub Actions __SCREAMING_SNAKE_CASE = InstagramUser(UpperCAmelCase__ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , UpperCAmelCase__ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_50 assert instagram_user.number_of_followers > 12_00_00 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "[email protected]" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('''https://instagram.''' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase__ =InstagramUser("github") print(instagram_user) print(F'''{instagram_user.number_of_posts = }''') print(F'''{instagram_user.number_of_followers = }''') print(F'''{instagram_user.number_of_followings = }''') print(F'''{instagram_user.email = }''') print(F'''{instagram_user.website = }''') print(F'''{instagram_user.profile_picture_url = }''') print(F'''{instagram_user.is_verified = }''') print(F'''{instagram_user.is_private = }''')
690
0
"""simple docstring""" from datetime import datetime import requests def _a ( UpperCAmelCase__ ) -> bytes: __SCREAMING_SNAKE_CASE = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url=''' __SCREAMING_SNAKE_CASE = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src'''] return requests.get(_A ).content if __name__ == "__main__": lowerCAmelCase__ =input("Enter Video/IGTV url: ").strip() lowerCAmelCase__ =F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4''' with open(file_name, "wb") as fp: fp.write(download_video(url)) print(F'''Done. Video saved to disk as {file_name}.''')
709
"""simple docstring""" from sklearn.metrics import recall_score import datasets lowerCAmelCase__ ="\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n" lowerCAmelCase__ ="\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n" lowerCAmelCase__ ="\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__( datasets.Metric ): def _a ( self : Any ) -> int: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ), '''references''': datasets.Sequence(datasets.Value('''int32''' ) ), } if self.config_name == '''multilabel''' else { '''predictions''': datasets.Value('''int32''' ), '''references''': datasets.Value('''int32''' ), } ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]=1 , __SCREAMING_SNAKE_CASE : Optional[Any]="binary" , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : List[Any]="warn" , ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = recall_score( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , pos_label=__SCREAMING_SNAKE_CASE , average=__SCREAMING_SNAKE_CASE , sample_weight=__SCREAMING_SNAKE_CASE , zero_division=__SCREAMING_SNAKE_CASE , ) return {"recall": float(__SCREAMING_SNAKE_CASE ) if score.size == 1 else score}
690
0
"""simple docstring""" import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() lowerCAmelCase__ =2 class A__: def __init__( self : int , *, # begin keyword-only arguments __SCREAMING_SNAKE_CASE : Union[str, Any]="<s>" , __SCREAMING_SNAKE_CASE : Tuple="<pad>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : int="<unk>" , __SCREAMING_SNAKE_CASE : int=None , ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = bos, unk, pad, eos __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = self.add_symbol(UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = self.add_symbol(UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = self.add_symbol(UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = self.add_symbol(UpperCamelCase__ ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = len(self.symbols ) def __eq__( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[int]: """simple docstring""" return self.indices == other.indices def __getitem__( self : Dict , __SCREAMING_SNAKE_CASE : Tuple ) -> Dict: """simple docstring""" if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self : Optional[Any] ) -> List[Any]: """simple docstring""" return len(self.symbols ) def __contains__( self : Tuple , __SCREAMING_SNAKE_CASE : str ) -> Optional[Any]: """simple docstring""" return sym in self.indices @classmethod def _a ( cls : str , __SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = cls() d.add_from_file(UpperCamelCase__ ) return d def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any]=1 , __SCREAMING_SNAKE_CASE : Any=False ) -> Union[str, Any]: """simple docstring""" if word in self.indices and not overwrite: __SCREAMING_SNAKE_CASE = self.indices[word] __SCREAMING_SNAKE_CASE = self.count[idx] + n return idx else: __SCREAMING_SNAKE_CASE = len(self.symbols ) __SCREAMING_SNAKE_CASE = idx self.symbols.append(UpperCamelCase__ ) self.count.append(UpperCamelCase__ ) return idx def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ) -> List[str]: """simple docstring""" return 0 def _a ( self : str , __SCREAMING_SNAKE_CASE : Dict ) -> List[str]: """simple docstring""" if isinstance(UpperCamelCase__ , UpperCamelCase__ ): try: with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as fd: self.add_from_file(UpperCamelCase__ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(UpperCamelCase__ ) ) return __SCREAMING_SNAKE_CASE = f.readlines() __SCREAMING_SNAKE_CASE = self._load_meta(UpperCamelCase__ ) for line in lines[indices_start_line:]: try: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = line.rstrip().rsplit(''' ''' , 1 ) if field == "#fairseq:overwrite": __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = line.rsplit(''' ''' , 1 ) else: __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = int(UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = line if word in self and not overwrite: raise RuntimeError( '''Duplicate word found when loading Dictionary: \'{}\'. ''' '''Duplicate words can overwrite earlier ones by adding the ''' '''#fairseq:overwrite flag at the end of the corresponding row ''' '''in the dictionary file. If using the Camembert model, please ''' '''download an updated copy of the model file.'''.format(UpperCamelCase__ ) ) self.add_symbol(UpperCamelCase__ , n=UpperCamelCase__ , overwrite=UpperCamelCase__ ) except ValueError: raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' ) def _a ( UpperCAmelCase__ ) -> List[str]: __SCREAMING_SNAKE_CASE = dict((re.sub(r'''@@$''' , '''''' , A_ ), v) if k.endswith('''@@''' ) else (re.sub(r'''$''' , '''</w>''' , A_ ), v) for k, v in d.items() ) __SCREAMING_SNAKE_CASE = '''<s> <pad> </s> <unk>'''.split() # restore the special tokens for k in keep_keys: del da[f"""{k}</w>"""] __SCREAMING_SNAKE_CASE = d[k] # restore return da def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> List[Any]: if not os.path.exists(A_ ): raise ValueError(f"""path {biogpt_checkpoint_path} does not exist!""" ) os.makedirs(A_ , exist_ok=A_ ) print(f"""Writing results to {pytorch_dump_folder_path}""" ) # handle various types of models __SCREAMING_SNAKE_CASE = os.path.join(A_ , '''checkpoint.pt''' ) if not os.path.isfile(A_ ): raise ValueError(f"""path to the file {checkpoint_file} does not exist!""" ) __SCREAMING_SNAKE_CASE = torch.load(A_ , map_location='''cpu''' ) __SCREAMING_SNAKE_CASE = chkpt['''cfg''']['''model'''] # dicts __SCREAMING_SNAKE_CASE = os.path.join(A_ , '''dict.txt''' ) if not os.path.isfile(A_ ): raise ValueError(f"""path to the file {dict_file} does not exist!""" ) __SCREAMING_SNAKE_CASE = Dictionary.load(A_ ) __SCREAMING_SNAKE_CASE = rewrite_dict_keys(src_dict.indices ) __SCREAMING_SNAKE_CASE = len(A_ ) __SCREAMING_SNAKE_CASE = os.path.join(A_ , VOCAB_FILES_NAMES['''vocab_file'''] ) print(f"""Generating {src_vocab_file} of {src_vocab_size} records""" ) with open(A_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(A_ , ensure_ascii=A_ , indent=A_ ) ) # merges_file (bpecodes) __SCREAMING_SNAKE_CASE = os.path.join(A_ , '''bpecodes''' ) if not os.path.isfile(A_ ): raise ValueError(f"""path to the file {bpecodes_file} does not exist!""" ) __SCREAMING_SNAKE_CASE = os.path.join(A_ , VOCAB_FILES_NAMES['''merges_file'''] ) shutil.copyfile(A_ , A_ ) # model config __SCREAMING_SNAKE_CASE = os.path.join(A_ , '''config.json''' ) __SCREAMING_SNAKE_CASE = { '''activation_dropout''': args['''activation_dropout'''], '''architectures''': ['''BioGptForCausalLM'''], '''attention_probs_dropout_prob''': args['''attention_dropout'''], '''bos_token_id''': 0, '''eos_token_id''': 2, '''hidden_act''': args['''activation_fn'''], '''hidden_dropout_prob''': args['''dropout'''], '''hidden_size''': args['''decoder_embed_dim'''], '''initializer_range''': 0.02, '''intermediate_size''': args['''decoder_ffn_embed_dim'''], '''layer_norm_eps''': 1E-12, '''layerdrop''': args['''decoder_layerdrop'''], '''max_position_embeddings''': args['''max_target_positions'''], '''model_type''': '''biogpt''', '''num_attention_heads''': args['''decoder_attention_heads'''], '''num_hidden_layers''': args['''decoder_layers'''], '''pad_token_id''': 1, '''scale_embedding''': not args['''no_scale_embedding'''], '''tie_word_embeddings''': args['''share_decoder_input_output_embed'''], '''vocab_size''': src_vocab_size, } # good hparam defaults to start with print(f"""Generating {biogpt_model_config_file}""" ) with open(A_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(A_ , ensure_ascii=A_ , indent=A_ ) ) # tokenizer config __SCREAMING_SNAKE_CASE = os.path.join(A_ , A_ ) __SCREAMING_SNAKE_CASE = { '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''model_max_length''': 10_24, '''pad_token''': '''<pad>''', '''special_tokens_map_file''': None, '''tokenizer_class''': '''BioGptTokenizer''', '''unk_token''': '''<unk>''', } print(f"""Generating {biogpt_tokenizer_config_file}""" ) with open(A_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(A_ , ensure_ascii=A_ , indent=A_ ) ) # model __SCREAMING_SNAKE_CASE = chkpt['''model'''] # remove unneeded keys __SCREAMING_SNAKE_CASE = [ '''decoder.version''', ] for k in ignore_keys: model_state_dict.pop(A_ , A_ ) __SCREAMING_SNAKE_CASE = list(model_state_dict.keys() ) for layer_name in layer_names: if layer_name.endswith('''output_projection.weight''' ): __SCREAMING_SNAKE_CASE = model_state_dict.pop(A_ ) else: __SCREAMING_SNAKE_CASE = model_state_dict.pop(A_ ) __SCREAMING_SNAKE_CASE = BioGptConfig.from_pretrained(A_ ) __SCREAMING_SNAKE_CASE = BioGptForCausalLM(A_ ) # check that it loads ok model_new.load_state_dict(A_ ) # save __SCREAMING_SNAKE_CASE = os.path.join(A_ , A_ ) print(f"""Generating {pytorch_weights_dump_path}""" ) torch.save(A_ , A_ ) print('''Conversion is done!''' ) if __name__ == "__main__": lowerCAmelCase__ =argparse.ArgumentParser() # Required parameters parser.add_argument( "--biogpt_checkpoint_path", default=None, type=str, required=True, help=( "Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts," " bpecodes, etc." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) lowerCAmelCase__ =parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
710
"""simple docstring""" def _a ( UpperCAmelCase__ = 10**9 ) -> int: __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 2 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value __SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(F'''{solution() = }''')
690
0
"""simple docstring""" import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class A__( snake_case__ , unittest.TestCase ): lowerCAmelCase = ProphetNetTokenizer lowerCAmelCase = False def _a ( self : str ) -> List[Any]: """simple docstring""" super().setUp() __SCREAMING_SNAKE_CASE = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = 'UNwant\u00E9d,running' __SCREAMING_SNAKE_CASE = 'unwanted, running' return input_text, output_text def _a ( self : Optional[int] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file ) __SCREAMING_SNAKE_CASE = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(_A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] ) def _a ( self : List[str] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def _a ( self : List[str] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def _a ( self : Any ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def _a ( self : Dict ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def _a ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def _a ( self : Union[str, Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def _a ( self : Any ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def _a ( self : int ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def _a ( self : Tuple ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=_A , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def _a ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] __SCREAMING_SNAKE_CASE = {} for i, token in enumerate(_A ): __SCREAMING_SNAKE_CASE = i __SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=_A , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) @require_torch def _a ( self : List[Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' ) __SCREAMING_SNAKE_CASE = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __SCREAMING_SNAKE_CASE = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02] __SCREAMING_SNAKE_CASE = tokenizer(_A , padding=_A , return_tensors='''pt''' ) self.assertIsInstance(_A , _A ) __SCREAMING_SNAKE_CASE = list(batch.input_ids.numpy()[0] ) self.assertListEqual(_A , _A ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) def _a ( self : int ) -> Tuple: """simple docstring""" self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def _a ( self : str ) -> str: """simple docstring""" self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def _a ( self : Tuple ) -> Any: """simple docstring""" self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) @slow def _a ( self : int ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' ) __SCREAMING_SNAKE_CASE = tokenizer.encode('''sequence builders''' , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(_A ) __SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(_A , _A ) assert encoded_sentence == text + [1_02] assert encoded_pair == text + [1_02] + text_a + [1_02]
711
"""simple docstring""" import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset lowerCAmelCase__ =pd.read_csv( "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/" "position_salaries.csv" ) lowerCAmelCase__ =dataset.iloc[:, 1:2].values lowerCAmelCase__ =dataset.iloc[:, 2].values lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ =train_test_split(X, y, test_size=0.2, random_state=0) lowerCAmelCase__ =PolynomialFeatures(degree=4) lowerCAmelCase__ =poly_reg.fit_transform(X) lowerCAmelCase__ =LinearRegression() pol_reg.fit(X_poly, y) def _a ( ) -> List[Any]: plt.scatter(UpperCAmelCase__ , UpperCAmelCase__ , color='''red''' ) plt.plot(UpperCAmelCase__ , pol_reg.predict(poly_reg.fit_transform(UpperCAmelCase__ ) ) , color='''blue''' ) plt.title('''Truth or Bluff (Linear Regression)''' ) plt.xlabel('''Position level''' ) plt.ylabel('''Salary''' ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
690
0
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase__ =logging.get_logger(__name__) lowerCAmelCase__ ={"tokenizer_file": "tokenizer.json"} lowerCAmelCase__ ={ "tokenizer_file": { "bigscience/tokenizer": "https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json", "bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json", "bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json", "bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json", "bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json", "bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json", "bigscience/bloom": "https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json", }, } class A__( __magic_name__ ): lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = ['''input_ids''', '''attention_mask'''] lowerCAmelCase = None def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Dict="<unk>" , __SCREAMING_SNAKE_CASE : int="<s>" , __SCREAMING_SNAKE_CASE : Optional[Any]="</s>" , __SCREAMING_SNAKE_CASE : Optional[int]="<pad>" , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> List[Any]: """simple docstring""" super().__init__( __a , __a , tokenizer_file=__a , unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , add_prefix_space=__a , clean_up_tokenization_spaces=__a , **__a , ) __SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , __a ) != add_prefix_space: __SCREAMING_SNAKE_CASE = getattr(__a , pre_tok_state.pop('''type''' ) ) __SCREAMING_SNAKE_CASE = add_prefix_space __SCREAMING_SNAKE_CASE = pre_tok_class(**__a ) __SCREAMING_SNAKE_CASE = add_prefix_space def _a ( self : Any , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> BatchEncoding: """simple docstring""" __SCREAMING_SNAKE_CASE = kwargs.get('''is_split_into_words''' , __a ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with""" ''' pretokenized inputs.''' ) return super()._batch_encode_plus(*__a , **__a ) def _a ( self : int , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : Tuple ) -> BatchEncoding: """simple docstring""" __SCREAMING_SNAKE_CASE = kwargs.get('''is_split_into_words''' , __a ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with""" ''' pretokenized inputs.''' ) return super()._encode_plus(*__a , **__a ) def _a ( self : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] = None ) -> Tuple[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self._tokenizer.model.save(__a , name=__a ) return tuple(__a ) def _a ( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__a , add_special_tokens=__a ) + [self.eos_token_id] ) if len(__a ) > self.model_max_length: __SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :] return input_ids
712
"""simple docstring""" from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class A__: lowerCAmelCase = MBartConfig lowerCAmelCase = {} lowerCAmelCase = '''gelu''' def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple=13 , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=99 , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Any=4 , __SCREAMING_SNAKE_CASE : List[str]=37 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Any=20 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : Optional[int]=1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0 , ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = eos_token_id __SCREAMING_SNAKE_CASE = pad_token_id __SCREAMING_SNAKE_CASE = bos_token_id def _a ( self : List[str] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __SCREAMING_SNAKE_CASE = prepare_mbart_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return config, inputs_dict def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFMBartModel(config=__SCREAMING_SNAKE_CASE ).get_decoder() __SCREAMING_SNAKE_CASE = inputs_dict['''input_ids'''] __SCREAMING_SNAKE_CASE = input_ids[:1, :] __SCREAMING_SNAKE_CASE = inputs_dict['''attention_mask'''][:1, :] __SCREAMING_SNAKE_CASE = inputs_dict['''head_mask'''] __SCREAMING_SNAKE_CASE = 1 # first forward pass __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple() __SCREAMING_SNAKE_CASE = past_key_values[1] def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , ) -> Optional[int]: if attention_mask is None: __SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(UpperCAmelCase__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __SCREAMING_SNAKE_CASE = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class A__( __magic_name__ , __magic_name__ , unittest.TestCase ): lowerCAmelCase = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () lowerCAmelCase = (TFMBartForConditionalGeneration,) if is_tf_available() else () lowerCAmelCase = ( { '''conversational''': TFMBartForConditionalGeneration, '''feature-extraction''': TFMBartModel, '''summarization''': TFMBartForConditionalGeneration, '''text2text-generation''': TFMBartForConditionalGeneration, '''translation''': TFMBartForConditionalGeneration, } if is_tf_available() else {} ) lowerCAmelCase = True lowerCAmelCase = False lowerCAmelCase = False def _a ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]: """simple docstring""" if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def _a ( self : List[Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFMBartModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE ) def _a ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() def _a ( self : int ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__SCREAMING_SNAKE_CASE ) @require_sentencepiece @require_tokenizers @require_tf class A__( unittest.TestCase ): lowerCAmelCase = [ ''' UN Chief Says There Is No Military Solution in Syria''', ] lowerCAmelCase = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', ] lowerCAmelCase = '''facebook/mbart-large-en-ro''' @cached_property def _a ( self : Optional[int] ) -> str: """simple docstring""" return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def _a ( self : Optional[Any] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def _a ( self : Any , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.translate_src_text(**__SCREAMING_SNAKE_CASE ) self.assertListEqual(self.expected_text , __SCREAMING_SNAKE_CASE ) def _a ( self : Any , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , **__SCREAMING_SNAKE_CASE , return_tensors='''tf''' ) __SCREAMING_SNAKE_CASE = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) __SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) return generated_words @slow def _a ( self : Union[str, Any] ) -> Tuple: """simple docstring""" self._assert_generated_batch_equal_expected()
690
0
"""simple docstring""" import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class A__( unittest.TestCase ): def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int]=7 , __SCREAMING_SNAKE_CASE : Optional[Any]=3 , __SCREAMING_SNAKE_CASE : Tuple=18 , __SCREAMING_SNAKE_CASE : Optional[Any]=30 , __SCREAMING_SNAKE_CASE : List[Any]=4_00 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : List[Any]=True , ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 18, """width""": 18} __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = min_resolution __SCREAMING_SNAKE_CASE = max_resolution __SCREAMING_SNAKE_CASE = do_resize __SCREAMING_SNAKE_CASE = size __SCREAMING_SNAKE_CASE = do_normalize def _a ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04], [-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class A__( lowercase_ , unittest.TestCase ): lowerCAmelCase = ImageGPTImageProcessor if is_vision_available() else None def _a ( self : Dict ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = ImageGPTImageProcessingTester(self ) @property def _a ( self : List[str] ) -> Optional[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _a ( self : List[Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase_ , '''clusters''' ) ) self.assertTrue(hasattr(lowerCamelCase_ , '''do_resize''' ) ) self.assertTrue(hasattr(lowerCamelCase_ , '''size''' ) ) self.assertTrue(hasattr(lowerCamelCase_ , '''do_normalize''' ) ) def _a ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} ) __SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) def _a ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) __SCREAMING_SNAKE_CASE = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(lowerCamelCase_ , obj[key] ) ) else: self.assertEqual(obj[key] , lowerCamelCase_ ) def _a ( self : str ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase_ , '''image_processor.json''' ) image_processor_first.to_json_file(lowerCamelCase_ ) __SCREAMING_SNAKE_CASE = self.image_processing_class.from_json_file(lowerCamelCase_ ).to_dict() __SCREAMING_SNAKE_CASE = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowerCamelCase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowerCamelCase_ ) def _a ( self : List[str] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(lowerCamelCase_ ) __SCREAMING_SNAKE_CASE = self.image_processing_class.from_pretrained(lowerCamelCase_ ).to_dict() __SCREAMING_SNAKE_CASE = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowerCamelCase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowerCamelCase_ ) @unittest.skip('''ImageGPT requires clusters at initialization''' ) def _a ( self : Tuple ) -> Union[str, Any]: """simple docstring""" pass def _a ( ) -> List[Any]: __SCREAMING_SNAKE_CASE = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' ) __SCREAMING_SNAKE_CASE = Image.open(dataset[4]['''file'''] ) __SCREAMING_SNAKE_CASE = Image.open(dataset[5]['''file'''] ) __SCREAMING_SNAKE_CASE = [imagea, imagea] return images @require_vision @require_torch class A__( unittest.TestCase ): @slow def _a ( self : Optional[int] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' ) __SCREAMING_SNAKE_CASE = prepare_images() # test non-batched __SCREAMING_SNAKE_CASE = image_processing(images[0] , return_tensors='''pt''' ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 10_24) ) __SCREAMING_SNAKE_CASE = [3_06, 1_91, 1_91] self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCamelCase_ ) # test batched __SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase_ , return_tensors='''pt''' ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 10_24) ) __SCREAMING_SNAKE_CASE = [3_03, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCamelCase_ )
713
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase__ =logging.get_logger(__name__) lowerCAmelCase__ ={ "Visual-Attention-Network/van-base": ( "https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json" ), } class A__( __magic_name__ ): lowerCAmelCase = '''van''' def __init__( self : int , __SCREAMING_SNAKE_CASE : Optional[Any]=2_24 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : Tuple=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE : Optional[int]=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE : str=[64, 1_28, 3_20, 5_12] , __SCREAMING_SNAKE_CASE : Optional[Any]=[3, 3, 12, 3] , __SCREAMING_SNAKE_CASE : Dict=[8, 8, 4, 4] , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : Dict=1E-6 , __SCREAMING_SNAKE_CASE : Any=1E-2 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , **__SCREAMING_SNAKE_CASE : str , ) -> List[str]: """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = patch_sizes __SCREAMING_SNAKE_CASE = strides __SCREAMING_SNAKE_CASE = hidden_sizes __SCREAMING_SNAKE_CASE = depths __SCREAMING_SNAKE_CASE = mlp_ratios __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = layer_scale_init_value __SCREAMING_SNAKE_CASE = drop_path_rate __SCREAMING_SNAKE_CASE = dropout_rate
690
0
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, PNDMScheduler, StableDiffusionLDMaDPipeline, UNetaDConditionModel, ) from diffusers.utils import nightly, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS enable_full_determinism() class A__( unittest.TestCase ): lowerCAmelCase = StableDiffusionLDMaDPipeline lowerCAmelCase = TEXT_TO_IMAGE_PARAMS lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS def _a ( self : Union[str, Any] ) -> Dict: """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) __SCREAMING_SNAKE_CASE = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = AutoencoderKL( block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) __SCREAMING_SNAKE_CASE = CLIPTextModel(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __SCREAMING_SNAKE_CASE = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int]=0 ) -> Optional[int]: """simple docstring""" if str(UpperCamelCase_ ).startswith('''mps''' ): __SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCamelCase_ ) else: __SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def _a ( self : Union[str, Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = '''cpu''' # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE = self.get_dummy_components() __SCREAMING_SNAKE_CASE = StableDiffusionLDMaDPipeline(**UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = ldmad_pipe.to(UpperCamelCase_ ) ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = ldmad_pipe(**UpperCamelCase_ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output.rgb, output.depth __SCREAMING_SNAKE_CASE = rgb[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE = depth[0, -3:, -1] assert rgb.shape == (1, 64, 64, 3) assert depth.shape == (1, 64, 64) __SCREAMING_SNAKE_CASE = np.array( [0.37_33_81_76, 0.7_02_47, 0.74_20_31_93, 0.51_64_36_04, 0.58_25_67_93, 0.60_93_21_36, 0.4_18_10_95, 0.48_35_58_77, 0.46_53_52_62] ) __SCREAMING_SNAKE_CASE = np.array([1_03.4_67_27, 85.81_20_04, 87.84_92_36] ) assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2 assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2 def _a ( self : List[Any] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.get_dummy_components() __SCREAMING_SNAKE_CASE = StableDiffusionLDMaDPipeline(**UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = ldmad_pipe.to(UpperCamelCase_ ) ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = 3 * [inputs['''prompt''']] # forward __SCREAMING_SNAKE_CASE = ldmad_pipe(**UpperCamelCase_ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output.rgb, output.depth __SCREAMING_SNAKE_CASE = rgb_slice_a[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE = depth_slice_a[0, -3:, -1] __SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = 3 * [inputs.pop('''prompt''' )] __SCREAMING_SNAKE_CASE = ldmad_pipe.tokenizer( UpperCamelCase_ , padding='''max_length''' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=UpperCamelCase_ , return_tensors='''pt''' , ) __SCREAMING_SNAKE_CASE = text_inputs['''input_ids'''].to(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = ldmad_pipe.text_encoder(UpperCamelCase_ )[0] __SCREAMING_SNAKE_CASE = prompt_embeds # forward __SCREAMING_SNAKE_CASE = ldmad_pipe(**UpperCamelCase_ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output.rgb, output.depth __SCREAMING_SNAKE_CASE = rgb_slice_a[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE = depth_slice_a[0, -3:, -1] assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4 assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4 def _a ( self : Optional[int] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = '''cpu''' # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE = self.get_dummy_components() __SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = StableDiffusionLDMaDPipeline(**UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = ldmad_pipe.to(UpperCamelCase_ ) ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = '''french fries''' __SCREAMING_SNAKE_CASE = ldmad_pipe(**UpperCamelCase_ , negative_prompt=UpperCamelCase_ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output.rgb, output.depth __SCREAMING_SNAKE_CASE = rgb[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE = depth[0, -3:, -1] assert rgb.shape == (1, 64, 64, 3) assert depth.shape == (1, 64, 64) __SCREAMING_SNAKE_CASE = np.array( [0.3_70_44, 0.71_81_15_03, 0.7_22_32_51, 0.48_60_36_75, 0.5_63_83_91, 0.6_36_49_48, 0.42_83_37_04, 0.4_90_13_15, 0.47_92_62_17] ) __SCREAMING_SNAKE_CASE = np.array([1_07.8_47_38, 84.6_28_02, 89.96_21_35] ) assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2 assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2 @slow @require_torch_gpu class A__( unittest.TestCase ): def _a ( self : Tuple ) -> List[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]="cpu" , __SCREAMING_SNAKE_CASE : int=torch.floataa , __SCREAMING_SNAKE_CASE : Union[str, Any]=0 ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = np.random.RandomState(UpperCamelCase_ ).standard_normal((1, 4, 64, 64) ) __SCREAMING_SNAKE_CASE = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ , dtype=UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = { '''prompt''': '''a photograph of an astronaut riding a horse''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def _a ( self : Optional[Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' ) __SCREAMING_SNAKE_CASE = ldmad_pipe.to(UpperCamelCase_ ) ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = self.get_inputs(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = ldmad_pipe(**UpperCamelCase_ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output.rgb, output.depth __SCREAMING_SNAKE_CASE = rgb[0, -3:, -3:, -1].flatten() __SCREAMING_SNAKE_CASE = rgb[0, -3:, -1].flatten() assert rgb.shape == (1, 5_12, 5_12, 3) assert depth.shape == (1, 5_12, 5_12) __SCREAMING_SNAKE_CASE = np.array( [0.53_80_54_65, 0.56_70_73_05, 0.5_48_65_15, 0.57_01_22_36, 0.5_81_45_11, 0.56_25_34_87, 0.54_84_30_14, 0.55_09_22_63, 0.6_45_97_06] ) __SCREAMING_SNAKE_CASE = np.array( [0.9_26_37_81, 0.6_67_86_72, 0.5_48_65_15, 0.92_20_21_45, 0.67_83_11_35, 0.56_25_34_87, 0.9_24_16_94, 0.7_55_14_78, 0.6_45_97_06] ) assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3 assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3 @nightly @require_torch_gpu class A__( unittest.TestCase ): def _a ( self : str ) -> Optional[int]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int="cpu" , __SCREAMING_SNAKE_CASE : List[Any]=torch.floataa , __SCREAMING_SNAKE_CASE : Optional[int]=0 ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = np.random.RandomState(UpperCamelCase_ ).standard_normal((1, 4, 64, 64) ) __SCREAMING_SNAKE_CASE = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ , dtype=UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = { '''prompt''': '''a photograph of an astronaut riding a horse''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 50, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def _a ( self : int ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' ).to(UpperCamelCase_ ) ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = self.get_inputs(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = ldmad_pipe(**UpperCamelCase_ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output.rgb, output.depth __SCREAMING_SNAKE_CASE = 0.49_55_86 __SCREAMING_SNAKE_CASE = 0.33_79_55_15 __SCREAMING_SNAKE_CASE = 1_12.4_85_18 __SCREAMING_SNAKE_CASE = 98.48_97_46 assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3 assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3 assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3 assert np.abs(expected_depth_std - depth.std() ) < 1E-3 def _a ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d-4c''' ).to(UpperCamelCase_ ) ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = self.get_inputs(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = ldmad_pipe(**UpperCamelCase_ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output.rgb, output.depth __SCREAMING_SNAKE_CASE = 0.4_19_41_27 __SCREAMING_SNAKE_CASE = 0.35_37_55_86 __SCREAMING_SNAKE_CASE = 0.5_63_85_02 __SCREAMING_SNAKE_CASE = 0.34_68_61_03 assert rgb.shape == (1, 5_12, 5_12, 3) assert depth.shape == (1, 5_12, 5_12, 1) assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3 assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3 assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3 assert np.abs(expected_depth_std - depth.std() ) < 1E-3
714
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ ={"configuration_timm_backbone": ["TimmBackboneConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =["TimmBackbone"] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys lowerCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
690
0
"""simple docstring""" def _a ( UpperCAmelCase__ ) -> Dict: if not nums: # Makes sure that the list is not empty raise ValueError('''List is empty''' ) __SCREAMING_SNAKE_CASE = sum(lowerCamelCase__ ) / len(lowerCamelCase__ ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(lowerCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
715
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase__ ={ "configuration_altclip": [ "ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "AltCLIPConfig", "AltCLIPTextConfig", "AltCLIPVisionConfig", ], "processing_altclip": ["AltCLIPProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =[ "ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "AltCLIPPreTrainedModel", "AltCLIPModel", "AltCLIPTextModel", "AltCLIPVisionModel", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys lowerCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
690
0
"""simple docstring""" from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class A__( __magic_name__ ): def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Distribution , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Dict=0 ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = 1.0 if scale is None else scale __SCREAMING_SNAKE_CASE = 0.0 if loc is None else loc super().__init__(lowercase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowercase_ )] ) @property def _a ( self : Optional[int] ) -> List[str]: """simple docstring""" return self.base_dist.mean * self.scale + self.loc @property def _a ( self : int ) -> Tuple: """simple docstring""" return self.base_dist.variance * self.scale**2 @property def _a ( self : str ) -> List[Any]: """simple docstring""" return self.variance.sqrt() class A__( nn.Module ): def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : Callable[..., Tuple[torch.Tensor]] , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> None: """simple docstring""" super().__init__(**lowercase_ ) __SCREAMING_SNAKE_CASE = args_dim __SCREAMING_SNAKE_CASE = nn.ModuleList([nn.Linear(lowercase_ , lowercase_ ) for dim in args_dim.values()] ) __SCREAMING_SNAKE_CASE = domain_map def _a ( self : Dict , __SCREAMING_SNAKE_CASE : torch.Tensor ) -> Tuple[torch.Tensor]: """simple docstring""" __SCREAMING_SNAKE_CASE = [proj(lowercase_ ) for proj in self.proj] return self.domain_map(*lowercase_ ) class A__( nn.Module ): def __init__( self : Any , __SCREAMING_SNAKE_CASE : Dict ) -> Dict: """simple docstring""" super().__init__() __SCREAMING_SNAKE_CASE = function def _a ( self : Dict , __SCREAMING_SNAKE_CASE : Tuple , *__SCREAMING_SNAKE_CASE : Optional[int] ) -> List[Any]: """simple docstring""" return self.function(lowercase_ , *lowercase_ ) class A__: lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 42 def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int = 1 ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = dim __SCREAMING_SNAKE_CASE = {k: dim * self.args_dim[k] for k in self.args_dim} def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> str: """simple docstring""" if self.dim == 1: return self.distribution_class(*lowercase_ ) else: return Independent(self.distribution_class(*lowercase_ ) , 1 ) def _a ( self : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , __SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , ) -> Distribution: """simple docstring""" __SCREAMING_SNAKE_CASE = self._base_distribution(lowercase_ ) if loc is None and scale is None: return distr else: return AffineTransformed(lowercase_ , loc=lowercase_ , scale=lowercase_ , event_dim=self.event_dim ) @property def _a ( self : Optional[Any] ) -> Tuple: """simple docstring""" return () if self.dim == 1 else (self.dim,) @property def _a ( self : Optional[int] ) -> int: """simple docstring""" return len(self.event_shape ) @property def _a ( self : Union[str, Any] ) -> float: """simple docstring""" return 0.0 def _a ( self : int , __SCREAMING_SNAKE_CASE : int ) -> nn.Module: """simple docstring""" return ParameterProjection( in_features=lowercase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def _a ( self : Optional[int] , *__SCREAMING_SNAKE_CASE : torch.Tensor ) -> Dict: """simple docstring""" raise NotImplementedError() @staticmethod def _a ( __SCREAMING_SNAKE_CASE : torch.Tensor ) -> torch.Tensor: """simple docstring""" return (x + torch.sqrt(torch.square(lowercase_ ) + 4.0 )) / 2.0 class A__( __magic_name__ ): lowerCAmelCase = {'''df''': 1, '''loc''': 1, '''scale''': 1} lowerCAmelCase = StudentT @classmethod def _a ( cls : str , __SCREAMING_SNAKE_CASE : torch.Tensor , __SCREAMING_SNAKE_CASE : torch.Tensor , __SCREAMING_SNAKE_CASE : torch.Tensor ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = cls.squareplus(lowercase_ ).clamp_min(torch.finfo(scale.dtype ).eps ) __SCREAMING_SNAKE_CASE = 2.0 + cls.squareplus(lowercase_ ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class A__( __magic_name__ ): lowerCAmelCase = {'''loc''': 1, '''scale''': 1} lowerCAmelCase = Normal @classmethod def _a ( cls : Union[str, Any] , __SCREAMING_SNAKE_CASE : torch.Tensor , __SCREAMING_SNAKE_CASE : torch.Tensor ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = cls.squareplus(lowercase_ ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class A__( __magic_name__ ): lowerCAmelCase = {'''total_count''': 1, '''logits''': 1} lowerCAmelCase = NegativeBinomial @classmethod def _a ( cls : Tuple , __SCREAMING_SNAKE_CASE : torch.Tensor , __SCREAMING_SNAKE_CASE : torch.Tensor ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = cls.squareplus(lowercase_ ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def _a ( self : Any , __SCREAMING_SNAKE_CASE : Tuple ) -> Distribution: """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = distr_args if self.dim == 1: return self.distribution_class(total_count=lowercase_ , logits=lowercase_ ) else: return Independent(self.distribution_class(total_count=lowercase_ , logits=lowercase_ ) , 1 ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , __SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None ) -> Distribution: """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
716
"""simple docstring""" import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class A__( unittest.TestCase ): def _a ( self : int ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = 0 def _a ( self : Tuple ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : str ) -> Optional[int]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json''' __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : List[str] ) -> Optional[int]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json''' __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : str ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = CLIPConfig() # Create a dummy config file with image_proceesor_type __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json''' __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ).to_dict() config_dict.pop('''image_processor_type''' ) __SCREAMING_SNAKE_CASE = CLIPImageProcessor(**__SCREAMING_SNAKE_CASE ) # save in new folder model_config.save_pretrained(__SCREAMING_SNAKE_CASE ) config.save_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) # make sure private variable is not incorrectly saved __SCREAMING_SNAKE_CASE = json.loads(config.to_json_string() ) self.assertTrue('''_processor_class''' not in dict_as_saved ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : Optional[int] ) -> Tuple: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : List[Any] ) -> str: """simple docstring""" with self.assertRaisesRegex( __SCREAMING_SNAKE_CASE , '''clip-base is not a local folder and is not a valid model identifier''' ): __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''clip-base''' ) def _a ( self : Any ) -> Optional[Any]: """simple docstring""" with self.assertRaisesRegex( __SCREAMING_SNAKE_CASE , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE , revision='''aaaaaa''' ) def _a ( self : Dict ) -> Dict: """simple docstring""" with self.assertRaisesRegex( __SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ): __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' ) def _a ( self : int ) -> Any: """simple docstring""" with self.assertRaises(__SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(__SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE , trust_remote_code=__SCREAMING_SNAKE_CASE ) self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' ) def _a ( self : Optional[Any] ) -> str: """simple docstring""" try: AutoConfig.register('''custom''' , __SCREAMING_SNAKE_CASE ) AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__SCREAMING_SNAKE_CASE ): AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json''' __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) ) __SCREAMING_SNAKE_CASE = CustomImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def _a ( self : int ) -> List[Any]: """simple docstring""" class A__( __magic_name__ ): lowerCAmelCase = True try: AutoConfig.register('''custom''' , __SCREAMING_SNAKE_CASE ) AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # If remote code is not set, the default is to use local __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(not hasattr(__SCREAMING_SNAKE_CASE , '''is_local''' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
690
0
"""simple docstring""" from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
717
"""simple docstring""" import math lowerCAmelCase__ =10 lowerCAmelCase__ =7 lowerCAmelCase__ =BALLS_PER_COLOUR * NUM_COLOURS def _a ( UpperCAmelCase__ = 20 ) -> str: __SCREAMING_SNAKE_CASE = math.comb(UpperCAmelCase__ , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = NUM_COLOURS * (1 - missing_colour / total) return f"""{result:.9f}""" if __name__ == "__main__": print(solution(20))
690
0
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class A__( __magic_name__ , __magic_name__ , unittest.TestCase ): lowerCAmelCase = CycleDiffusionPipeline lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { 'negative_prompt', 'height', 'width', 'negative_prompt_embeds', } lowerCAmelCase = PipelineTesterMixin.required_optional_params - {'latents'} lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} ) lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS def _a ( self : Dict ) -> Tuple: """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) __SCREAMING_SNAKE_CASE = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) __SCREAMING_SNAKE_CASE = CLIPTextModel(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __SCREAMING_SNAKE_CASE = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def _a ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict=0 ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = image / 2 + 0.5 if str(UpperCAmelCase__ ).startswith('''mps''' ): __SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase__ ) else: __SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = { '''prompt''': '''An astronaut riding an elephant''', '''source_prompt''': '''An astronaut riding a horse''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''eta''': 0.1, '''strength''': 0.8, '''guidance_scale''': 3, '''source_guidance_scale''': 1, '''output_type''': '''numpy''', } return inputs def _a ( self : str ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = '''cpu''' # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE = self.get_dummy_components() __SCREAMING_SNAKE_CASE = CycleDiffusionPipeline(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = pipe(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) __SCREAMING_SNAKE_CASE = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def _a ( self : str ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.get_dummy_components() for name, module in components.items(): if hasattr(UpperCAmelCase__ , '''half''' ): __SCREAMING_SNAKE_CASE = module.half() __SCREAMING_SNAKE_CASE = CycleDiffusionPipeline(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = pipe(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) __SCREAMING_SNAKE_CASE = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def _a ( self : Tuple ) -> Optional[int]: """simple docstring""" return super().test_save_load_local() @unittest.skip('''non-deterministic pipeline''' ) def _a ( self : Optional[Any] ) -> Dict: """simple docstring""" return super().test_inference_batch_single_identical() @skip_mps def _a ( self : Dict ) -> List[Any]: """simple docstring""" return super().test_dict_tuple_outputs_equivalent() @skip_mps def _a ( self : int ) -> Union[str, Any]: """simple docstring""" return super().test_save_load_optional_components() @skip_mps def _a ( self : int ) -> Any: """simple docstring""" return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class A__( unittest.TestCase ): def _a ( self : int ) -> List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : str ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/cycle-diffusion/black_colored_car.png''' ) __SCREAMING_SNAKE_CASE = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' ) __SCREAMING_SNAKE_CASE = init_image.resize((5_12, 5_12) ) __SCREAMING_SNAKE_CASE = '''CompVis/stable-diffusion-v1-4''' __SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained(UpperCAmelCase__ , subfolder='''scheduler''' ) __SCREAMING_SNAKE_CASE = CycleDiffusionPipeline.from_pretrained( UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ , torch_dtype=torch.floataa , revision='''fp16''' ) pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) pipe.enable_attention_slicing() __SCREAMING_SNAKE_CASE = '''A black colored car''' __SCREAMING_SNAKE_CASE = '''A blue colored car''' __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe( prompt=UpperCAmelCase__ , source_prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCAmelCase__ , output_type='''np''' , ) __SCREAMING_SNAKE_CASE = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5E-1 def _a ( self : Optional[int] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/cycle-diffusion/black_colored_car.png''' ) __SCREAMING_SNAKE_CASE = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' ) __SCREAMING_SNAKE_CASE = init_image.resize((5_12, 5_12) ) __SCREAMING_SNAKE_CASE = '''CompVis/stable-diffusion-v1-4''' __SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained(UpperCAmelCase__ , subfolder='''scheduler''' ) __SCREAMING_SNAKE_CASE = CycleDiffusionPipeline.from_pretrained(UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ ) pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) pipe.enable_attention_slicing() __SCREAMING_SNAKE_CASE = '''A black colored car''' __SCREAMING_SNAKE_CASE = '''A blue colored car''' __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe( prompt=UpperCAmelCase__ , source_prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCAmelCase__ , output_type='''np''' , ) __SCREAMING_SNAKE_CASE = output.images assert np.abs(image - expected_image ).max() < 2E-2
718
"""simple docstring""" from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax lowerCAmelCase__ =logging.get_logger(__name__) @add_end_docstrings(__magic_name__ ) class A__( __magic_name__ ): def __init__( self : Optional[Any] , **__SCREAMING_SNAKE_CASE : str ) -> Optional[Any]: """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) requires_backends(self , '''vision''' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image", List["Image"]] , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple: """simple docstring""" return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _a ( self : int , **__SCREAMING_SNAKE_CASE : int ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = {} if "candidate_labels" in kwargs: __SCREAMING_SNAKE_CASE = kwargs['''candidate_labels'''] if "hypothesis_template" in kwargs: __SCREAMING_SNAKE_CASE = kwargs['''hypothesis_template'''] return preprocess_params, {}, {} def _a ( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]="This is a photo of {}." ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = load_image(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.image_processor(images=[image] , return_tensors=self.framework ) __SCREAMING_SNAKE_CASE = candidate_labels __SCREAMING_SNAKE_CASE = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels] __SCREAMING_SNAKE_CASE = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [text_inputs] return inputs def _a ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = model_inputs.pop('''candidate_labels''' ) __SCREAMING_SNAKE_CASE = model_inputs.pop('''text_inputs''' ) if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = text_inputs[0] else: # Batching case. __SCREAMING_SNAKE_CASE = text_inputs[0][0] __SCREAMING_SNAKE_CASE = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = { '''candidate_labels''': candidate_labels, '''logits''': outputs.logits_per_image, } return model_outputs def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = model_outputs.pop('''candidate_labels''' ) __SCREAMING_SNAKE_CASE = model_outputs['''logits'''][0] if self.framework == "pt": __SCREAMING_SNAKE_CASE = logits.softmax(dim=-1 ).squeeze(-1 ) __SCREAMING_SNAKE_CASE = probs.tolist() if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = [scores] elif self.framework == "tf": __SCREAMING_SNAKE_CASE = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 ) __SCREAMING_SNAKE_CASE = probs.numpy().tolist() else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) __SCREAMING_SNAKE_CASE = [ {'''score''': score, '''label''': candidate_label} for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : -x[0] ) ] return result
690
0
"""simple docstring""" from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING lowerCAmelCase__ =logging.get_logger(__name__) @add_end_docstrings(SCREAMING_SNAKE_CASE__ ) class A__( SCREAMING_SNAKE_CASE__ ): def __init__( self : List[str] , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> int: """simple docstring""" super().__init__(**snake_case__ ) if self.framework == "tf": raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" ) requires_backends(self , '''vision''' ) self.check_model_type(snake_case__ ) def __call__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any = None , **__SCREAMING_SNAKE_CASE : Dict , ) -> List[Any]: """simple docstring""" if "text_queries" in kwargs: __SCREAMING_SNAKE_CASE = kwargs.pop('''text_queries''' ) if isinstance(snake_case__ , (str, Image.Image) ): __SCREAMING_SNAKE_CASE = {"image": image, "candidate_labels": candidate_labels} else: __SCREAMING_SNAKE_CASE = image __SCREAMING_SNAKE_CASE = super().__call__(snake_case__ , **snake_case__ ) return results def _a ( self : Optional[int] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = {} if "threshold" in kwargs: __SCREAMING_SNAKE_CASE = kwargs["threshold"] if "top_k" in kwargs: __SCREAMING_SNAKE_CASE = kwargs["top_k"] return {}, {}, postprocess_params def _a ( self : str , __SCREAMING_SNAKE_CASE : str ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = load_image(inputs['''image'''] ) __SCREAMING_SNAKE_CASE = inputs["candidate_labels"] if isinstance(snake_case__ , snake_case__ ): __SCREAMING_SNAKE_CASE = candidate_labels.split(''',''' ) __SCREAMING_SNAKE_CASE = torch.tensor([[image.height, image.width]] , dtype=torch.intaa ) for i, candidate_label in enumerate(snake_case__ ): __SCREAMING_SNAKE_CASE = self.tokenizer(snake_case__ , return_tensors=self.framework ) __SCREAMING_SNAKE_CASE = self.image_processor(snake_case__ , return_tensors=self.framework ) yield { "is_last": i == len(snake_case__ ) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : str ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = model_inputs.pop('''target_size''' ) __SCREAMING_SNAKE_CASE = model_inputs.pop('''candidate_label''' ) __SCREAMING_SNAKE_CASE = model_inputs.pop('''is_last''' ) __SCREAMING_SNAKE_CASE = self.model(**snake_case__ ) __SCREAMING_SNAKE_CASE = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs} return model_outputs def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : str=None ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = [] for model_output in model_outputs: __SCREAMING_SNAKE_CASE = model_output["candidate_label"] __SCREAMING_SNAKE_CASE = BaseModelOutput(snake_case__ ) __SCREAMING_SNAKE_CASE = self.image_processor.post_process_object_detection( outputs=snake_case__ , threshold=snake_case__ , target_sizes=model_output['''target_size'''] )[0] for index in outputs["scores"].nonzero(): __SCREAMING_SNAKE_CASE = outputs["scores"][index].item() __SCREAMING_SNAKE_CASE = self._get_bounding_box(outputs['''boxes'''][index][0] ) __SCREAMING_SNAKE_CASE = {"score": score, "label": label, "box": box} results.append(snake_case__ ) __SCREAMING_SNAKE_CASE = sorted(snake_case__ , key=lambda __SCREAMING_SNAKE_CASE : x["score"] , reverse=snake_case__ ) if top_k: __SCREAMING_SNAKE_CASE = results[:top_k] return results def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str ) -> Optional[int]: """simple docstring""" if self.framework != "pt": raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' ) __SCREAMING_SNAKE_CASE = box.int().tolist() __SCREAMING_SNAKE_CASE = { "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
719
"""simple docstring""" from __future__ import annotations from collections.abc import Callable lowerCAmelCase__ =list[list[float | int]] def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> Matrix: __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [[0 for _ in range(size + 1 )] for _ in range(UpperCAmelCase__ )] __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for row in range(UpperCAmelCase__ ): for col in range(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = matrix[row][col] __SCREAMING_SNAKE_CASE = vector[row][0] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 while row < size and col < size: # pivoting __SCREAMING_SNAKE_CASE = max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCAmelCase__ , UpperCAmelCase__ ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = augmented[rowa][col] / augmented[row][col] __SCREAMING_SNAKE_CASE = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , UpperCAmelCase__ ): for row in range(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = augmented[row][col] / augmented[col][col] for cola in range(UpperCAmelCase__ , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(UpperCAmelCase__ ) ] def _a ( UpperCAmelCase__ ) -> Callable[[int], int]: __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [[0 for _ in range(UpperCAmelCase__ )] for _ in range(UpperCAmelCase__ )] __SCREAMING_SNAKE_CASE = [[0] for _ in range(UpperCAmelCase__ )] __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for x_val, y_val in enumerate(UpperCAmelCase__ ): for col in range(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = (x_val + 1) ** (size - col - 1) __SCREAMING_SNAKE_CASE = y_val __SCREAMING_SNAKE_CASE = solve(UpperCAmelCase__ , UpperCAmelCase__ ) def interpolated_func(UpperCAmelCase__ ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(UpperCAmelCase__ ) ) return interpolated_func def _a ( UpperCAmelCase__ ) -> int: return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def _a ( UpperCAmelCase__ = question_function , UpperCAmelCase__ = 10 ) -> int: __SCREAMING_SNAKE_CASE = [func(UpperCAmelCase__ ) for x_val in range(1 , order + 1 )] __SCREAMING_SNAKE_CASE = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for poly in polynomials: __SCREAMING_SNAKE_CASE = 1 while func(UpperCAmelCase__ ) == poly(UpperCAmelCase__ ): x_val += 1 ret += poly(UpperCAmelCase__ ) return ret if __name__ == "__main__": print(F'''{solution() = }''')
690
0
"""simple docstring""" import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class A__( __lowerCamelCase ): lowerCAmelCase = (DDPMParallelScheduler,) def _a ( self : Optional[int] , **__SCREAMING_SNAKE_CASE : Tuple ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = { '''num_train_timesteps''': 10_00, '''beta_start''': 0.00_01, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**SCREAMING_SNAKE_CASE_ ) return config def _a ( self : Any ) -> Union[str, Any]: """simple docstring""" for timesteps in [1, 5, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ ) def _a ( self : Tuple ) -> Union[str, Any]: """simple docstring""" for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_ ) def _a ( self : Union[str, Any] ) -> int: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_ ) def _a ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE_ ) def _a ( self : Tuple ) -> Dict: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_ ) def _a ( self : Union[str, Any] ) -> Any: """simple docstring""" self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , sample_max_value=SCREAMING_SNAKE_CASE_ , ) def _a ( self : Tuple ) -> int: """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ ) def _a ( self : str ) -> Optional[int]: """simple docstring""" for t in [0, 5_00, 9_99]: self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ ) def _a ( self : List[Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**SCREAMING_SNAKE_CASE_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_09_79 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1E-5 def _a ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**SCREAMING_SNAKE_CASE_ ) __SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ ) __SCREAMING_SNAKE_CASE = self.dummy_model() __SCREAMING_SNAKE_CASE = self.dummy_sample_deter __SCREAMING_SNAKE_CASE = self.dummy_sample_deter + 0.1 __SCREAMING_SNAKE_CASE = self.dummy_sample_deter - 0.1 __SCREAMING_SNAKE_CASE = samplea.shape[0] __SCREAMING_SNAKE_CASE = torch.stack([samplea, samplea, samplea] , dim=0 ) __SCREAMING_SNAKE_CASE = torch.arange(SCREAMING_SNAKE_CASE_ )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE_ ) __SCREAMING_SNAKE_CASE = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) __SCREAMING_SNAKE_CASE = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) __SCREAMING_SNAKE_CASE = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) ) __SCREAMING_SNAKE_CASE = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) ) assert abs(result_sum.item() - 11_53.18_33 ) < 1E-2 assert abs(result_mean.item() - 0.50_05 ) < 1E-3 def _a ( self : Dict ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**SCREAMING_SNAKE_CASE_ ) __SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ ) __SCREAMING_SNAKE_CASE = self.dummy_model() __SCREAMING_SNAKE_CASE = self.dummy_sample_deter __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) for t in reversed(range(SCREAMING_SNAKE_CASE_ ) ): # 1. predict noise residual __SCREAMING_SNAKE_CASE = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # 2. predict previous mean of sample x_t-1 __SCREAMING_SNAKE_CASE = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample __SCREAMING_SNAKE_CASE = pred_prev_sample __SCREAMING_SNAKE_CASE = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) ) __SCREAMING_SNAKE_CASE = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) ) assert abs(result_sum.item() - 2_58.96_06 ) < 1E-2 assert abs(result_mean.item() - 0.33_72 ) < 1E-3 def _a ( self : int ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type='''v_prediction''' ) __SCREAMING_SNAKE_CASE = scheduler_class(**SCREAMING_SNAKE_CASE_ ) __SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ ) __SCREAMING_SNAKE_CASE = self.dummy_model() __SCREAMING_SNAKE_CASE = self.dummy_sample_deter __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) for t in reversed(range(SCREAMING_SNAKE_CASE_ ) ): # 1. predict noise residual __SCREAMING_SNAKE_CASE = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # 2. predict previous mean of sample x_t-1 __SCREAMING_SNAKE_CASE = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample __SCREAMING_SNAKE_CASE = pred_prev_sample __SCREAMING_SNAKE_CASE = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) ) __SCREAMING_SNAKE_CASE = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) ) assert abs(result_sum.item() - 2_02.02_96 ) < 1E-2 assert abs(result_mean.item() - 0.26_31 ) < 1E-3 def _a ( self : Tuple ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**SCREAMING_SNAKE_CASE_ ) __SCREAMING_SNAKE_CASE = [1_00, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ ) __SCREAMING_SNAKE_CASE = scheduler.timesteps for i, timestep in enumerate(SCREAMING_SNAKE_CASE_ ): if i == len(SCREAMING_SNAKE_CASE_ ) - 1: __SCREAMING_SNAKE_CASE = -1 else: __SCREAMING_SNAKE_CASE = timesteps[i + 1] __SCREAMING_SNAKE_CASE = scheduler.previous_timestep(SCREAMING_SNAKE_CASE_ ) __SCREAMING_SNAKE_CASE = prev_t.item() self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _a ( self : List[str] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**SCREAMING_SNAKE_CASE_ ) __SCREAMING_SNAKE_CASE = [1_00, 87, 50, 51, 0] with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ ) def _a ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**SCREAMING_SNAKE_CASE_ ) __SCREAMING_SNAKE_CASE = [1_00, 87, 50, 1, 0] __SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ ) with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_ ) def _a ( self : List[Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**SCREAMING_SNAKE_CASE_ ) __SCREAMING_SNAKE_CASE = [scheduler.config.num_train_timesteps] with self.assertRaises( SCREAMING_SNAKE_CASE_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
720
"""simple docstring""" from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def _a ( UpperCAmelCase__ = "isbn/0140328726" ) -> dict: __SCREAMING_SNAKE_CASE = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes if new_olid.count('''/''' ) != 1: __SCREAMING_SNAKE_CASE = f"""{olid} is not a valid Open Library olid""" raise ValueError(UpperCAmelCase__ ) return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json() def _a ( UpperCAmelCase__ ) -> dict: __SCREAMING_SNAKE_CASE = { '''title''': '''Title''', '''publish_date''': '''Publish date''', '''authors''': '''Authors''', '''number_of_pages''': '''Number of pages:''', '''first_sentence''': '''First sentence''', '''isbn_10''': '''ISBN (10)''', '''isbn_13''': '''ISBN (13)''', } __SCREAMING_SNAKE_CASE = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} __SCREAMING_SNAKE_CASE = [ get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors'''] ] __SCREAMING_SNAKE_CASE = data['''First sentence''']['''value'''] for key, value in data.items(): if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = ''', '''.join(UpperCAmelCase__ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: lowerCAmelCase__ =input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(F'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''') continue print(F'''\nSearching Open Library for ISBN: {isbn}...\n''') try: lowerCAmelCase__ =summarize_book(get_openlibrary_data(F'''isbn/{isbn}''')) print("\n".join(F'''{key}: {value}''' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(F'''Sorry, there are no results for ISBN: {isbn}.''')
690
0
"""simple docstring""" lowerCAmelCase__ ={ """Pillow""": """Pillow""", """accelerate""": """accelerate>=0.11.0""", """compel""": """compel==0.1.8""", """black""": """black~=23.1""", """datasets""": """datasets""", """filelock""": """filelock""", """flax""": """flax>=0.4.1""", """hf-doc-builder""": """hf-doc-builder>=0.3.0""", """huggingface-hub""": """huggingface-hub>=0.13.2""", """requests-mock""": """requests-mock==1.10.0""", """importlib_metadata""": """importlib_metadata""", """invisible-watermark""": """invisible-watermark""", """isort""": """isort>=5.5.4""", """jax""": """jax>=0.2.8,!=0.3.2""", """jaxlib""": """jaxlib>=0.1.65""", """Jinja2""": """Jinja2""", """k-diffusion""": """k-diffusion>=0.0.12""", """torchsde""": """torchsde""", """note_seq""": """note_seq""", """librosa""": """librosa""", """numpy""": """numpy""", """omegaconf""": """omegaconf""", """parameterized""": """parameterized""", """protobuf""": """protobuf>=3.20.3,<4""", """pytest""": """pytest""", """pytest-timeout""": """pytest-timeout""", """pytest-xdist""": """pytest-xdist""", """ruff""": """ruff>=0.0.241""", """safetensors""": """safetensors""", """sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""", """scipy""": """scipy""", """onnx""": """onnx""", """regex""": """regex!=2019.12.17""", """requests""": """requests""", """tensorboard""": """tensorboard""", """torch""": """torch>=1.4""", """torchvision""": """torchvision""", """transformers""": """transformers>=4.25.1""", """urllib3""": """urllib3<=2.0.0""", }
721
"""simple docstring""" from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCAmelCase__ =logging.get_logger(__name__) class A__( __magic_name__ ): lowerCAmelCase = ['''audio_values''', '''audio_mask'''] def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any]=20_48 , __SCREAMING_SNAKE_CASE : str=1 , __SCREAMING_SNAKE_CASE : List[Any]=[16, 16] , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_28 , __SCREAMING_SNAKE_CASE : int=4_41_00 , __SCREAMING_SNAKE_CASE : Union[str, Any]=86 , __SCREAMING_SNAKE_CASE : str=20_48 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> Any: """simple docstring""" super().__init__( feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = spectrogram_length __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = patch_size __SCREAMING_SNAKE_CASE = feature_size // self.patch_size[1] __SCREAMING_SNAKE_CASE = n_fft __SCREAMING_SNAKE_CASE = sampling_rate // hop_length_to_sampling_rate __SCREAMING_SNAKE_CASE = sampling_rate __SCREAMING_SNAKE_CASE = padding_value __SCREAMING_SNAKE_CASE = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , ).T def _a ( self : str , __SCREAMING_SNAKE_CASE : np.array ) -> np.ndarray: """simple docstring""" __SCREAMING_SNAKE_CASE = spectrogram( __SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , ) __SCREAMING_SNAKE_CASE = log_spec[:, :-1] __SCREAMING_SNAKE_CASE = log_spec - 20.0 __SCREAMING_SNAKE_CASE = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : str , __SCREAMING_SNAKE_CASE : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = True , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , **__SCREAMING_SNAKE_CASE : Tuple , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( '''This feature extractor is set to support sampling rate''' f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled""" f""" with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) __SCREAMING_SNAKE_CASE = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) __SCREAMING_SNAKE_CASE = is_batched_numpy or ( isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __SCREAMING_SNAKE_CASE = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ): __SCREAMING_SNAKE_CASE = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __SCREAMING_SNAKE_CASE = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __SCREAMING_SNAKE_CASE = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis __SCREAMING_SNAKE_CASE = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask __SCREAMING_SNAKE_CASE = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: __SCREAMING_SNAKE_CASE = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] __SCREAMING_SNAKE_CASE = np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa ) # convert into correct format for padding __SCREAMING_SNAKE_CASE = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch __SCREAMING_SNAKE_CASE = np.ones([len(__SCREAMING_SNAKE_CASE ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) __SCREAMING_SNAKE_CASE = padded_audio_features * self.padding_value for i in range(len(__SCREAMING_SNAKE_CASE ) ): __SCREAMING_SNAKE_CASE = audio_features[i] __SCREAMING_SNAKE_CASE = feature # return as BatchFeature if return_attention_mask: __SCREAMING_SNAKE_CASE = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask} else: __SCREAMING_SNAKE_CASE = {'''audio_values''': padded_audio_features} __SCREAMING_SNAKE_CASE = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE ) return encoded_inputs
690
0
"""simple docstring""" import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class A__( unittest.TestCase ): def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int]=13 , __SCREAMING_SNAKE_CASE : Tuple=30 , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : int=3 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Tuple=32 , __SCREAMING_SNAKE_CASE : Any=5 , __SCREAMING_SNAKE_CASE : Optional[int]=4 , __SCREAMING_SNAKE_CASE : int=37 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : str=10 , __SCREAMING_SNAKE_CASE : Dict=0.02 , ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = patch_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2 __SCREAMING_SNAKE_CASE = num_patches + 1 def _a ( self : Tuple ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __SCREAMING_SNAKE_CASE = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) return config, pixel_values def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = FlaxViTModel(config=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) __SCREAMING_SNAKE_CASE = (self.image_size, self.image_size) __SCREAMING_SNAKE_CASE = (self.patch_size, self.patch_size) __SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.type_sequence_label_size __SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(config=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ) def _a ( self : List[str] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( __SCREAMING_SNAKE_CASE ) = config_and_inputs __SCREAMING_SNAKE_CASE = {'''pixel_values''': pixel_values} return config, inputs_dict @require_flax class A__( lowercase__ , unittest.TestCase ): lowerCAmelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def _a ( self : Optional[int] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = FlaxViTModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 ) def _a ( self : Dict ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def _a ( self : Tuple ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def _a ( self : str ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ ) def _a ( self : str ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] __SCREAMING_SNAKE_CASE = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCAmelCase__ ) def _a ( self : str ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __SCREAMING_SNAKE_CASE = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ ) @jax.jit def model_jitted(__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : Dict ): return model(pixel_values=UpperCAmelCase__ , **UpperCAmelCase__ ) with self.subTest('''JIT Enabled''' ): __SCREAMING_SNAKE_CASE = model_jitted(**UpperCAmelCase__ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): __SCREAMING_SNAKE_CASE = model_jitted(**UpperCAmelCase__ ).to_tuple() self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) ) for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def _a ( self : Tuple ) -> Tuple: """simple docstring""" for model_class_name in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class_name.from_pretrained('''google/vit-base-patch16-224''' ) __SCREAMING_SNAKE_CASE = model(np.ones((1, 3, 2_24, 2_24) ) ) self.assertIsNotNone(UpperCAmelCase__ )
700
"""simple docstring""" def _a ( UpperCAmelCase__ ) -> str: __SCREAMING_SNAKE_CASE = '''''' for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def _a ( UpperCAmelCase__ ) -> dict[str, str]: __SCREAMING_SNAKE_CASE = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key __SCREAMING_SNAKE_CASE = remove_duplicates(key.upper() ) __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) # First fill cipher with key characters __SCREAMING_SNAKE_CASE = {alphabet[i]: char for i, char in enumerate(UpperCAmelCase__ )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(UpperCAmelCase__ ) , 26 ): __SCREAMING_SNAKE_CASE = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 __SCREAMING_SNAKE_CASE = alphabet[i - offset] __SCREAMING_SNAKE_CASE = char return cipher_alphabet def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> str: return "".join(cipher_map.get(UpperCAmelCase__ , UpperCAmelCase__ ) for ch in message.upper() ) def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> str: __SCREAMING_SNAKE_CASE = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(UpperCAmelCase__ , UpperCAmelCase__ ) for ch in message.upper() ) def _a ( ) -> None: __SCREAMING_SNAKE_CASE = input('''Enter message to encode or decode: ''' ).strip() __SCREAMING_SNAKE_CASE = input('''Enter keyword: ''' ).strip() __SCREAMING_SNAKE_CASE = input('''Encipher or decipher? E/D:''' ).strip()[0].lower() try: __SCREAMING_SNAKE_CASE = {'''e''': encipher, '''d''': decipher}[option] except KeyError: raise KeyError('''invalid input option''' ) __SCREAMING_SNAKE_CASE = create_cipher_map(UpperCAmelCase__ ) print(func(UpperCAmelCase__ , UpperCAmelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
690
0
"""simple docstring""" import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) lowerCAmelCase__ ="hf-internal-testing/tiny-random-bert" lowerCAmelCase__ =os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert") lowerCAmelCase__ ="9b8c223d42b2188cb49d29af482996f9d0f3e5a6" class A__( unittest.TestCase ): def _a ( self : List[str] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = cached_file(_lowerCamelCase , _lowerCamelCase ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(_lowerCamelCase ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(_lowerCamelCase , _lowerCamelCase ) ) ) with open(os.path.join(_lowerCamelCase , '''refs''' , '''main''' ) ) as f: __SCREAMING_SNAKE_CASE = f.read() self.assertEqual(_lowerCamelCase , os.path.join(_lowerCamelCase , '''snapshots''' , _lowerCamelCase , _lowerCamelCase ) ) self.assertTrue(os.path.isfile(_lowerCamelCase ) ) # File is cached at the same place the second time. __SCREAMING_SNAKE_CASE = cached_file(_lowerCamelCase , _lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) # Using a specific revision to test the full commit hash. __SCREAMING_SNAKE_CASE = cached_file(_lowerCamelCase , _lowerCamelCase , revision='''9b8c223''' ) self.assertEqual(_lowerCamelCase , os.path.join(_lowerCamelCase , '''snapshots''' , _lowerCamelCase , _lowerCamelCase ) ) def _a ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" with self.assertRaisesRegex(_lowerCamelCase , '''is not a valid model identifier''' ): __SCREAMING_SNAKE_CASE = cached_file('''tiny-random-bert''' , _lowerCamelCase ) with self.assertRaisesRegex(_lowerCamelCase , '''is not a valid git identifier''' ): __SCREAMING_SNAKE_CASE = cached_file(_lowerCamelCase , _lowerCamelCase , revision='''aaaa''' ) with self.assertRaisesRegex(_lowerCamelCase , '''does not appear to have a file named''' ): __SCREAMING_SNAKE_CASE = cached_file(_lowerCamelCase , '''conf''' ) def _a ( self : str ) -> Union[str, Any]: """simple docstring""" with self.assertRaisesRegex(_lowerCamelCase , '''does not appear to have a file named''' ): __SCREAMING_SNAKE_CASE = cached_file(_lowerCamelCase , '''conf''' ) with open(os.path.join(_lowerCamelCase , '''refs''' , '''main''' ) ) as f: __SCREAMING_SNAKE_CASE = f.read() self.assertTrue(os.path.isfile(os.path.join(_lowerCamelCase , '''.no_exist''' , _lowerCamelCase , '''conf''' ) ) ) __SCREAMING_SNAKE_CASE = cached_file(_lowerCamelCase , '''conf''' , _raise_exceptions_for_missing_entries=_lowerCamelCase ) self.assertIsNone(_lowerCamelCase ) __SCREAMING_SNAKE_CASE = cached_file(_lowerCamelCase , '''conf''' , local_files_only=_lowerCamelCase , _raise_exceptions_for_missing_entries=_lowerCamelCase ) self.assertIsNone(_lowerCamelCase ) __SCREAMING_SNAKE_CASE = mock.Mock() __SCREAMING_SNAKE_CASE = 5_00 __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = HTTPError __SCREAMING_SNAKE_CASE = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=_lowerCamelCase ) as mock_head: __SCREAMING_SNAKE_CASE = cached_file(_lowerCamelCase , '''conf''' , _raise_exceptions_for_connection_errors=_lowerCamelCase ) self.assertIsNone(_lowerCamelCase ) # This check we did call the fake head request mock_head.assert_called() def _a ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _lowerCamelCase ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _lowerCamelCase ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _lowerCamelCase ) ) def _a ( self : Tuple ) -> List[Any]: """simple docstring""" self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(_lowerCamelCase , '''is not a valid model identifier''' ): get_file_from_repo('''bert-base-case''' , _lowerCamelCase ) # The function raises if the revision does not exist. with self.assertRaisesRegex(_lowerCamelCase , '''is not a valid git identifier''' ): get_file_from_repo('''bert-base-cased''' , _lowerCamelCase , revision='''ahaha''' ) __SCREAMING_SNAKE_CASE = get_file_from_repo('''bert-base-cased''' , _lowerCamelCase ) # The name is the cached name which is not very easy to test, so instead we load the content. __SCREAMING_SNAKE_CASE = json.loads(open(_lowerCamelCase , '''r''' ).read() ) self.assertEqual(config['''hidden_size'''] , 7_68 ) def _a ( self : List[str] ) -> Optional[int]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __SCREAMING_SNAKE_CASE = Path(_lowerCamelCase ) / '''a.txt''' filename.touch() self.assertEqual(get_file_from_repo(_lowerCamelCase , '''a.txt''' ) , str(_lowerCamelCase ) ) self.assertIsNone(get_file_from_repo(_lowerCamelCase , '''b.txt''' ) )
701
"""simple docstring""" from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__: def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : Optional[Any]=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=10 , __SCREAMING_SNAKE_CASE : str=[10, 20, 30, 40] , __SCREAMING_SNAKE_CASE : Optional[int]=[1, 1, 2, 1] , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Optional[Any]="relu" , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = embeddings_size __SCREAMING_SNAKE_CASE = hidden_sizes __SCREAMING_SNAKE_CASE = depths __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = scope __SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE ) def _a ( self : List[Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels ) __SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values, labels def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def _a ( self : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = TFRegNetModel(config=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _a ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = TFRegNetForImageClassification(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self : Optional[Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs __SCREAMING_SNAKE_CASE = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class A__( __magic_name__ , __magic_name__ , unittest.TestCase ): lowerCAmelCase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () lowerCAmelCase = ( {'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification} if is_tf_available() else {} ) lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False def _a ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFRegNetModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple ) -> Optional[Any]: """simple docstring""" return @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def _a ( self : Any ) -> Optional[Any]: """simple docstring""" pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , ) @slow def _a ( self : Dict ) -> List[Any]: """simple docstring""" super().test_keras_fit() @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def _a ( self : Dict ) -> Union[str, Any]: """simple docstring""" pass def _a ( self : List[Any] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] __SCREAMING_SNAKE_CASE = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE ) def _a ( self : Any ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def _a ( self : List[str] ) -> Tuple: """simple docstring""" def check_hidden_states_output(__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ): __SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , training=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __SCREAMING_SNAKE_CASE = self.model_tester.num_stages self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: __SCREAMING_SNAKE_CASE = layer_type __SCREAMING_SNAKE_CASE = True check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __SCREAMING_SNAKE_CASE = True check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(__SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]={} ): __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).to_tuple() def recursive_check(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict ): if isinstance(__SCREAMING_SNAKE_CASE , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): recursive_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) , msg=( '''Tuple and dict output are not equal. Difference:''' f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}""" ) , ) recursive_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE ) check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , {'''output_hidden_states''': True} ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE ) check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , {'''output_hidden_states''': True} ) def _a ( self : str ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE ) @slow def _a ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = TFRegNetModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def _a ( ) -> Dict: __SCREAMING_SNAKE_CASE = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class A__( unittest.TestCase ): @cached_property def _a ( self : List[Any] ) -> str: """simple docstring""" return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _a ( self : List[str] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) __SCREAMING_SNAKE_CASE = self.default_image_processor __SCREAMING_SNAKE_CASE = prepare_img() __SCREAMING_SNAKE_CASE = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''tf''' ) # forward pass __SCREAMING_SNAKE_CASE = model(**__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE ) # verify the logits __SCREAMING_SNAKE_CASE = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tf.constant([-0.41_80, -1.50_51, -3.48_36] ) tf.debugging.assert_near(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
690
0
"""simple docstring""" import math import flax.linen as nn import jax.numpy as jnp def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 1 , UpperCAmelCase__ = 1 , UpperCAmelCase__ = 1.0E4 , UpperCAmelCase__ = False , UpperCAmelCase__ = 1.0 , ) -> List[str]: assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, f"""Embedding dimension {embedding_dim} should be even""" __SCREAMING_SNAKE_CASE = float(embedding_dim // 2 ) __SCREAMING_SNAKE_CASE = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) __SCREAMING_SNAKE_CASE = min_timescale * jnp.exp(jnp.arange(lowerCamelCase_ , dtype=jnp.floataa ) * -log_timescale_increment ) __SCREAMING_SNAKE_CASE = jnp.expand_dims(lowerCamelCase_ , 1 ) * jnp.expand_dims(lowerCamelCase_ , 0 ) # scale embeddings __SCREAMING_SNAKE_CASE = scale * emb if flip_sin_to_cos: __SCREAMING_SNAKE_CASE = jnp.concatenate([jnp.cos(lowerCamelCase_ ), jnp.sin(lowerCamelCase_ )] , axis=1 ) else: __SCREAMING_SNAKE_CASE = jnp.concatenate([jnp.sin(lowerCamelCase_ ), jnp.cos(lowerCamelCase_ )] , axis=1 ) __SCREAMING_SNAKE_CASE = jnp.reshape(lowerCamelCase_ , [jnp.shape(lowerCamelCase_ )[0], embedding_dim] ) return signal class A__( nn.Module ): lowerCAmelCase = 32 lowerCAmelCase = jnp.floataa @nn.compact def __call__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(__lowerCamelCase ) __SCREAMING_SNAKE_CASE = nn.silu(__lowerCamelCase ) __SCREAMING_SNAKE_CASE = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(__lowerCamelCase ) return temb class A__( nn.Module ): lowerCAmelCase = 32 lowerCAmelCase = False lowerCAmelCase = 1 @nn.compact def __call__( self : List[str] , __SCREAMING_SNAKE_CASE : List[str] ) -> int: """simple docstring""" return get_sinusoidal_embeddings( __lowerCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
702
"""simple docstring""" import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ =get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class A__( __magic_name__ , unittest.TestCase ): lowerCAmelCase = XLMRobertaTokenizer lowerCAmelCase = XLMRobertaTokenizerFast lowerCAmelCase = True lowerCAmelCase = True def _a ( self : Optional[int] ) -> List[Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE ) tokenizer.save_pretrained(self.tmpdirname ) def _a ( self : str ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = '''<pad>''' __SCREAMING_SNAKE_CASE = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) def _a ( self : int ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_02 ) def _a ( self : Tuple ) -> Optional[int]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 10_02 ) def _a ( self : int ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) __SCREAMING_SNAKE_CASE = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( __SCREAMING_SNAKE_CASE , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) __SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) self.assertListEqual( __SCREAMING_SNAKE_CASE , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) __SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ) self.assertListEqual( __SCREAMING_SNAKE_CASE , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def _a ( self : int ) -> Tuple: """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __SCREAMING_SNAKE_CASE = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) __SCREAMING_SNAKE_CASE = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Checks everything loads correctly in the same way __SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(__SCREAMING_SNAKE_CASE ) # Save tokenizer rust, legacy_format=True __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE ) # Checks it save with the same files self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Checks everything loads correctly in the same way __SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) shutil.rmtree(__SCREAMING_SNAKE_CASE ) # Save tokenizer rust, legacy_format=False __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way __SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) shutil.rmtree(__SCREAMING_SNAKE_CASE ) @cached_property def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' ) def _a ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" with tempfile.NamedTemporaryFile() as f: shutil.copyfile(__SCREAMING_SNAKE_CASE , f.name ) __SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(f.name , keep_accents=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = pickle.dumps(__SCREAMING_SNAKE_CASE ) pickle.loads(__SCREAMING_SNAKE_CASE ) def _a ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" if not self.test_rust_tokenizer: return __SCREAMING_SNAKE_CASE = self.get_tokenizer() __SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE = '''I was born in 92000, and this is falsé.''' __SCREAMING_SNAKE_CASE = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE = tokenizer.encode(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @slow def _a ( self : Any ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = '''Hello World!''' __SCREAMING_SNAKE_CASE = [0, 3_53_78, 66_61, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) ) @slow def _a ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) __SCREAMING_SNAKE_CASE = [ 0, 32_93, 83, 10, 45_52, 49_89, 79_86, 6_78, 10, 59_15, 1_11, 17_94_59, 12_48_50, 4, 60_44, 2_37, 12, 6, 5, 6, 4, 67_80, 7_05, 15, 13_88, 44, 3_78, 1_01_14, 7_11, 1_52, 20, 6, 5, 2_23_76, 6_42, 12_21, 1_51_90, 3_41_53, 4_50, 56_08, 9_59, 11_19, 5_77_02, 1_36, 1_86, 47, 10_98, 2_93_67, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 60_44, 2_37, 62_84, 5_09_01, 5_28, 31, 90, 34, 9_27, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) ) @slow def _a ( self : Optional[int] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = {'''input_ids''': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
690
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ ={ "configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =["LlamaTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =["LlamaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =[ "LlamaForCausalLM", "LlamaModel", "LlamaPreTrainedModel", "LlamaForSequenceClassification", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys lowerCAmelCase__ =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
703
"""simple docstring""" from __future__ import annotations lowerCAmelCase__ =8.9_8_8E9 # units = N * m^s * C^-2 def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> dict[str, float]: __SCREAMING_SNAKE_CASE = abs(chargea * chargea ) if (force, chargea, chargea, distance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if distance < 0: raise ValueError('''Distance cannot be negative''' ) if force == 0: __SCREAMING_SNAKE_CASE = COULOMBS_CONSTANT * charge_product / (distance**2) return {"force": force} elif chargea == 0: __SCREAMING_SNAKE_CASE = abs(UpperCAmelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge1": chargea} elif chargea == 0: __SCREAMING_SNAKE_CASE = abs(UpperCAmelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge2": chargea} elif distance == 0: __SCREAMING_SNAKE_CASE = (COULOMBS_CONSTANT * charge_product / abs(UpperCAmelCase__ )) ** 0.5 return {"distance": distance} raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
690
0
"""simple docstring""" import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ =logging.get_logger(__name__) lowerCAmelCase__ ="▁" lowerCAmelCase__ ={"vocab_file": "prophetnet.tokenizer"} lowerCAmelCase__ ={ "vocab_file": { "microsoft/xprophetnet-large-wiki100-cased": ( "https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer" ), } } lowerCAmelCase__ ={ "microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False}, } lowerCAmelCase__ ={ "microsoft/xprophetnet-large-wiki100-cased": 512, } def _a ( UpperCAmelCase__ ) -> str: __SCREAMING_SNAKE_CASE = collections.OrderedDict() with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as reader: __SCREAMING_SNAKE_CASE = reader.readlines() for index, token in enumerate(__SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = token.rstrip('''\n''' ) __SCREAMING_SNAKE_CASE = index return vocab class A__( snake_case__ ): lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = ['''input_ids''', '''attention_mask'''] def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any]="[SEP]" , __SCREAMING_SNAKE_CASE : str="[SEP]" , __SCREAMING_SNAKE_CASE : Any="[SEP]" , __SCREAMING_SNAKE_CASE : Optional[Any]="[UNK]" , __SCREAMING_SNAKE_CASE : Tuple="[PAD]" , __SCREAMING_SNAKE_CASE : int="[CLS]" , __SCREAMING_SNAKE_CASE : int="[MASK]" , __SCREAMING_SNAKE_CASE : List[str] = None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , ) try: import sentencepiece as spm except ImportError: logger.warning( '''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece''' ''' pip install sentencepiece''' ) raise __SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowercase_ ) ) __SCREAMING_SNAKE_CASE = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # put special tokens and [unused] tokens into the vocab __SCREAMING_SNAKE_CASE = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4} for i in range(10 ): __SCREAMING_SNAKE_CASE = f"""[unused{i}]""" __SCREAMING_SNAKE_CASE = 5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab __SCREAMING_SNAKE_CASE = 12 __SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()} for k in self.fairseq_tokens_to_ids.keys(): self.unique_no_split_tokens.append(lowercase_ ) def __getstate__( self : str ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.__dict__.copy() __SCREAMING_SNAKE_CASE = None return state def __setstate__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = d try: import sentencepiece as spm except ImportError: logger.warning( '''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece''' ''' pip install sentencepiece''' ) raise # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] = None , __SCREAMING_SNAKE_CASE : str = False ) -> List[str]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ ) if token_ids_a is None: return ([0] * len(lowercase_ )) + [1] return ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1] def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int = None ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = [self.sep_token_id] if token_ids_a is None: return len(token_ids_a + sep ) * [0] return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _a ( self : Union[str, Any] ) -> str: """simple docstring""" return len(self.sp_model ) + self.fairseq_offset def _a ( self : List[str] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _a ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] ) -> List[str]: """simple docstring""" return self.sp_model.encode(lowercase_ , out_type=lowercase_ ) def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict ) -> Dict: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __SCREAMING_SNAKE_CASE = self.sp_model.PieceToId(lowercase_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> int: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = "".join(lowercase_ ).replace(lowercase_ , ''' ''' ).strip() return out_string def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] = None ) -> List[str]: """simple docstring""" if not os.path.isdir(lowercase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __SCREAMING_SNAKE_CASE = os.path.join( lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowercase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowercase_ , '''wb''' ) as fi: __SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto() fi.write(lowercase_ ) return (out_vocab_file,) def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple = None ) -> List[str]: """simple docstring""" if token_ids_a is None: return token_ids_a + [self.sep_token_id] __SCREAMING_SNAKE_CASE = [self.sep_token_id] return token_ids_a + sep + token_ids_a + sep
704
"""simple docstring""" import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ =logging.get_logger(__name__) def _a ( UpperCAmelCase__ ) -> Tuple: __SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , map_location='''cpu''' ) if "model" in sd.keys(): __SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , map_location='''cpu''' )['''model'''] # pop unnecessary weights __SCREAMING_SNAKE_CASE = [ '''decoder.version''', '''decoder.output_projection.weight''', ] for key in keys_to_delete: if key in sd: sd.pop(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = { '''decoder.project_in_dim.weight''': '''decoder.project_in.weight''', '''decoder.project_out_dim.weight''': '''decoder.project_out.weight''', '''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''', } for old_key, new_key in keys_to_rename.items(): if old_key in sd: __SCREAMING_SNAKE_CASE = sd.pop(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = list(sd.keys() ) for key in keys: if ".qkv_proj." in key: __SCREAMING_SNAKE_CASE = sd[key] # We split QKV in separate Q,K,V __SCREAMING_SNAKE_CASE = key.replace('''.qkv_proj.''' , '''.q_proj.''' ) __SCREAMING_SNAKE_CASE = key.replace('''.qkv_proj.''' , '''.k_proj.''' ) __SCREAMING_SNAKE_CASE = key.replace('''.qkv_proj.''' , '''.v_proj.''' ) __SCREAMING_SNAKE_CASE = value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch.split(UpperCAmelCase__ , depth // 3 , dim=0 ) __SCREAMING_SNAKE_CASE = q __SCREAMING_SNAKE_CASE = k __SCREAMING_SNAKE_CASE = v del sd[key] return sd @torch.no_grad() def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = load_checkpoint(UpperCAmelCase__ ) if config is not None: __SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained(UpperCAmelCase__ ) else: __SCREAMING_SNAKE_CASE = OPTConfig() __SCREAMING_SNAKE_CASE = OPTModel(UpperCAmelCase__ ).half().eval() model.load_state_dict(UpperCAmelCase__ ) # Check results Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ ) model.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": lowerCAmelCase__ =argparse.ArgumentParser() # Required parameters parser.add_argument( "--fairseq_path", type=str, help=( "path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:" " https://huggingface.co/models?other=opt_metasq" ), ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.") lowerCAmelCase__ =parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
690
0