code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
from __future__ import annotations import numpy as np def lowerCamelCase_ ( __UpperCamelCase ): A_ , A_ = np.shape(__UpperCamelCase ) if rows != columns: A_ = ( '''\'table\' has to be of square shaped array but got a ''' F"{rows}x{columns} array:\n{table}" ) raise ValueError(__UpperCamelCase ) A_ = np.zeros((rows, columns) ) A_ = np.zeros((rows, columns) ) for i in range(__UpperCamelCase ): for j in range(__UpperCamelCase ): A_ = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) ) if upper[j][j] == 0: raise ArithmeticError('''No LU decomposition exists''' ) A_ = (table[i][j] - total) / upper[j][j] A_ = 1 for j in range(__UpperCamelCase , __UpperCamelCase ): A_ = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) ) A_ = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
141
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. SCREAMING_SNAKE_CASE : Dict = abspath(join(dirname(__file__), "src")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="ignore", category=FutureWarning) def lowerCamelCase_ ( __UpperCamelCase ): config.addinivalue_line( '''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' ) config.addinivalue_line( '''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' ) config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' ) config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' ) config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' ) config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' ) def lowerCamelCase_ ( __UpperCamelCase ): from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__UpperCamelCase ) def lowerCamelCase_ ( __UpperCamelCase ): from transformers.testing_utils import pytest_terminal_summary_main A_ = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(__UpperCamelCase , id=__UpperCamelCase ) def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase ): # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: A_ = 0 # Doctest custom flag to ignore output. SCREAMING_SNAKE_CASE : Dict = doctest.register_optionflag("IGNORE_RESULT") SCREAMING_SNAKE_CASE : Optional[int] = doctest.OutputChecker class __lowercase ( A ): def lowerCAmelCase_ ( self , a__ , a__ , a__ ) -> List[str]: '''simple docstring''' if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , a__ , a__ , a__ ) SCREAMING_SNAKE_CASE : List[str] = CustomOutputChecker SCREAMING_SNAKE_CASE : Optional[int] = HfDoctestModule SCREAMING_SNAKE_CASE : Any = HfDocTestParser
141
1
"""simple docstring""" import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class UpperCAmelCase_ ( unittest.TestCase): def _UpperCAmelCase ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() def _UpperCAmelCase ( self ) -> Tuple: lowercase__ , lowercase__ : str = FlaxControlNetModel.from_pretrained( 'lllyasviel/sd-controlnet-canny' , from_pt=a , dtype=jnp.bfloataa ) lowercase__ , lowercase__ : List[str] = FlaxStableDiffusionControlNetPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa ) lowercase__ : List[Any] = controlnet_params lowercase__ : int = 'bird' lowercase__ : List[Any] = jax.device_count() lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples ) lowercase__ : Union[str, Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ) lowercase__ : Optional[int] = pipe.prepare_image_inputs([canny_image] * num_samples ) lowercase__ : List[Any] = jax.random.PRNGKey(0 ) lowercase__ : Tuple = jax.random.split(a , jax.device_count() ) lowercase__ : str = replicate(a ) lowercase__ : List[str] = shard(a ) lowercase__ : Dict = shard(a ) lowercase__ : List[Any] = pipe( prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3) lowercase__ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowercase__ : Tuple = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] lowercase__ : int = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowercase__ : Optional[Any] = jnp.array( [0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def _UpperCAmelCase ( self ) -> List[str]: lowercase__ , lowercase__ : int = FlaxControlNetModel.from_pretrained( 'lllyasviel/sd-controlnet-openpose' , from_pt=a , dtype=jnp.bfloataa ) lowercase__ , lowercase__ : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa ) lowercase__ : Optional[Any] = controlnet_params lowercase__ : List[Any] = 'Chef in the kitchen' lowercase__ : List[str] = jax.device_count() lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples ) lowercase__ : Optional[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' ) lowercase__ : Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples ) lowercase__ : List[str] = jax.random.PRNGKey(0 ) lowercase__ : str = jax.random.split(a , jax.device_count() ) lowercase__ : Optional[Any] = replicate(a ) lowercase__ : Optional[Any] = shard(a ) lowercase__ : List[Any] = shard(a ) lowercase__ : List[Any] = pipe( prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3) lowercase__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowercase__ : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] lowercase__ : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowercase__ : str = jnp.array( [[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
645
"""simple docstring""" import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class UpperCAmelCase_ : def __init__( self , a , a=1_3 , a=1_0 , a=3 , a=2 , a=2 , a=2 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=0.9 , a=None , ) -> Optional[Any]: lowercase__ : str = parent lowercase__ : int = batch_size lowercase__ : Union[str, Any] = image_size lowercase__ : Optional[Any] = num_channels lowercase__ : Dict = patch_size lowercase__ : Tuple = tubelet_size lowercase__ : Optional[int] = num_frames lowercase__ : Optional[int] = is_training lowercase__ : int = use_labels lowercase__ : Optional[int] = hidden_size lowercase__ : Union[str, Any] = num_hidden_layers lowercase__ : Optional[int] = num_attention_heads lowercase__ : Any = intermediate_size lowercase__ : str = hidden_act lowercase__ : List[Any] = hidden_dropout_prob lowercase__ : str = attention_probs_dropout_prob lowercase__ : Union[str, Any] = type_sequence_label_size lowercase__ : List[Any] = initializer_range lowercase__ : str = mask_ratio lowercase__ : Optional[Any] = scope # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame lowercase__ : Optional[Any] = (image_size // patch_size) ** 2 lowercase__ : str = (num_frames // tubelet_size) * self.num_patches_per_frame # use this variable to define bool_masked_pos lowercase__ : str = int(mask_ratio * self.seq_length ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : int = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) lowercase__ : int = None if self.use_labels: lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : Dict = self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self ) -> Tuple: return VideoMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , ) def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]: lowercase__ : Dict = VideoMAEModel(config=a ) model.to(a ) model.eval() lowercase__ : Tuple = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]: lowercase__ : str = VideoMAEForPreTraining(a ) model.to(a ) model.eval() # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch lowercase__ : Any = torch.ones((self.num_masks,) ) lowercase__ : str = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] ) lowercase__ : Optional[int] = mask.expand(self.batch_size , -1 ).bool() lowercase__ : str = model(a , a ) # model only returns predictions for masked patches lowercase__ : str = mask.sum().item() lowercase__ : int = 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) ) def _UpperCAmelCase ( self ) -> str: lowercase__ : Dict = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs lowercase__ : List[str] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _a , _a , unittest.TestCase): lowerCamelCase__ : Tuple = ( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) lowerCamelCase__ : Optional[int] = ( {"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification} if is_torch_available() else {} ) lowerCamelCase__ : Any = False lowerCamelCase__ : Any = False lowerCamelCase__ : Union[str, Any] = False lowerCamelCase__ : str = False def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Optional[Any] = VideoMAEModelTester(self ) lowercase__ : Optional[Any] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 ) def _UpperCAmelCase ( self , a , a , a=False ) -> Optional[int]: lowercase__ : Union[str, Any] = copy.deepcopy(a ) if model_class == VideoMAEForPreTraining: # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch lowercase__ : Optional[Any] = torch.ones((self.model_tester.num_masks,) ) lowercase__ : Any = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] ) lowercase__ : Any = mask.expand(self.model_tester.batch_size , -1 ).bool() lowercase__ : Union[str, Any] = bool_masked_pos.to(a ) if return_labels: if model_class in [ *get_values(a ), ]: lowercase__ : Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a ) return inputs_dict def _UpperCAmelCase ( self ) -> Tuple: self.config_tester.run_common_tests() @unittest.skip(reason='VideoMAE does not use inputs_embeds' ) def _UpperCAmelCase ( self ) -> Dict: pass def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : int = model_class(a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase__ : int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a , nn.Linear ) ) def _UpperCAmelCase ( self ) -> Optional[int]: lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[str] = model_class(a ) lowercase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Optional[Any] = [*signature.parameters.keys()] lowercase__ : int = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*a ) @slow def _UpperCAmelCase ( self ) -> str: for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : List[Any] = VideoMAEModel.from_pretrained(a ) self.assertIsNotNone(a ) def _UpperCAmelCase ( self ) -> Optional[Any]: if not self.has_attentions: pass else: lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : str = True for model_class in self.all_model_classes: lowercase__ : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks lowercase__ : Any = ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) lowercase__ : Optional[Any] = True lowercase__ : int = False lowercase__ : Any = True lowercase__ : List[str] = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : Optional[int] = model(**self._prepare_for_class(a , a ) ) lowercase__ : Dict = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase__ : str = True lowercase__ : List[str] = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : List[Any] = model(**self._prepare_for_class(a , a ) ) lowercase__ : Optional[Any] = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) lowercase__ : List[str] = len(a ) # Check attention is always last and order is fine lowercase__ : Optional[int] = True lowercase__ : List[str] = True lowercase__ : int = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : List[str] = model(**self._prepare_for_class(a , a ) ) self.assertEqual(out_len + 1 , len(a ) ) lowercase__ : int = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def _UpperCAmelCase ( self ) -> Optional[int]: def check_hidden_states_output(a , a , a ): lowercase__ : Optional[int] = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) ) lowercase__ : Optional[int] = outputs.hidden_states lowercase__ : List[Any] = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(a ) , a ) lowercase__ : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks lowercase__ : Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Tuple = True check_hidden_states_output(a , a , a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : Union[str, Any] = True check_hidden_states_output(a , a , a ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCAmelCase ( self ) -> List[Any]: pass def a_ ( ): '''simple docstring''' lowercase__ : int = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' ) lowercase__ : str = np.load(_lowerCAmelCase ) return list(_lowerCAmelCase ) @require_torch @require_vision class UpperCAmelCase_ ( unittest.TestCase): @cached_property def _UpperCAmelCase ( self ) -> Optional[Any]: # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def _UpperCAmelCase ( self ) -> int: lowercase__ : Dict = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to( a ) lowercase__ : str = self.default_image_processor lowercase__ : List[str] = prepare_video() lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): lowercase__ : Union[str, Any] = model(**a ) # verify the logits lowercase__ : str = torch.Size((1, 4_0_0) ) self.assertEqual(outputs.logits.shape , a ) lowercase__ : List[Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) ) @slow def _UpperCAmelCase ( self ) -> List[str]: lowercase__ : Optional[int] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(a ) lowercase__ : Optional[Any] = self.default_image_processor lowercase__ : List[str] = prepare_video() lowercase__ : str = image_processor(a , return_tensors='pt' ).to(a ) # add boolean mask, indicating which patches to mask lowercase__ : Union[str, Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' ) lowercase__ : str = torch.load(a ) # forward pass with torch.no_grad(): lowercase__ : List[Any] = model(**a ) # verify the logits lowercase__ : Dict = torch.Size([1, 1_4_0_8, 1_5_3_6] ) lowercase__ : List[str] = torch.tensor( [[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=a ) self.assertEqual(outputs.logits.shape , a ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `True`) lowercase__ : List[Any] = torch.tensor([0.5_142] , device=a ) self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `False`) lowercase__ : Tuple = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=a ).to( a ) with torch.no_grad(): lowercase__ : Any = model(**a ) lowercase__ : List[Any] = torch.tensor(torch.tensor([0.6_469] ) , device=a ) self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
645
1
import numpy as np import qiskit def _lowercase ( __lowerCamelCase : Tuple = 8 ,__lowerCamelCase : List[str] = None ) -> str: '''simple docstring''' UpperCamelCase__ : Tuple = np.random.default_rng(seed=_lowerCAmelCase ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. UpperCamelCase__ : List[str] = 6 * key_len # Measurement basis for Alice's qubits. UpperCamelCase__ : List[Any] = rng.integers(2 ,size=_lowerCAmelCase ) # The set of states Alice will prepare. UpperCamelCase__ : List[Any] = rng.integers(2 ,size=_lowerCAmelCase ) # Measurement basis for Bob's qubits. UpperCamelCase__ : Optional[int] = rng.integers(2 ,size=_lowerCAmelCase ) # Quantum Circuit to simulate BB84 UpperCamelCase__ : List[Any] = qiskit.QuantumCircuit(_lowerCAmelCase ,name='''BB84''' ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(_lowerCAmelCase ): if alice_state[index] == 1: bbaa_circ.x(_lowerCAmelCase ) if alice_basis[index] == 1: bbaa_circ.h(_lowerCAmelCase ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(_lowerCAmelCase ): if bob_basis[index] == 1: bbaa_circ.h(_lowerCAmelCase ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. UpperCamelCase__ : Union[str, Any] = qiskit.Aer.get_backend('''aer_simulator''' ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. UpperCamelCase__ : Tuple = qiskit.execute(_lowerCAmelCase ,_lowerCAmelCase ,shots=1 ,seed_simulator=_lowerCAmelCase ) # Returns the result of measurement. UpperCamelCase__ : Optional[Any] = job.result().get_counts(_lowerCAmelCase ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. UpperCamelCase__ : Tuple = "".join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. UpperCamelCase__ : Tuple = gen_key[:key_len] if len(_lowerCAmelCase ) >= key_len else gen_key.ljust(_lowerCAmelCase ,'''0''' ) return key if __name__ == "__main__": print(F'The generated key is : {bbaa(8, seed=0)}') from doctest import testmod testmod()
344
from typing import TYPE_CHECKING from ...utils import _LazyModule __lowerCamelCase : str = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys __lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
629
0
import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def __lowerCamelCase ( A__ : Dict , A__ : Union[str, Any]=0.999 , A__ : List[Any]="cosine" , ) -> Optional[Any]: if alpha_transform_type == "cosine": def alpha_bar_fn(A__ : Tuple ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(A__ : Any ): return math.exp(t * -12.0 ) else: raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) lowerCamelCase_ : Tuple = [] for i in range(A__ ): lowerCamelCase_ : List[Any] = i / num_diffusion_timesteps lowerCamelCase_ : Optional[Any] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ) , A__ ) ) return torch.tensor(A__ , dtype=torch.floataa ) class SCREAMING_SNAKE_CASE_ (a__ , a__ ): '''simple docstring''' _a = [e.name for e in KarrasDiffusionSchedulers] _a = 2 @register_to_config def __init__( self : Tuple , __a : int = 1_000 , __a : float = 0.00_085 , __a : float = 0.012 , __a : str = "linear" , __a : Optional[Union[np.ndarray, List[float]]] = None , __a : str = "epsilon" , __a : str = "linspace" , __a : int = 0 , ) ->Union[str, Any]: if trained_betas is not None: lowerCamelCase_ : Tuple = torch.tensor(__a , dtype=torch.floataa ) elif beta_schedule == "linear": lowerCamelCase_ : str = torch.linspace(__a , __a , __a , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. lowerCamelCase_ : Tuple = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , __a , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule lowerCamelCase_ : Any = betas_for_alpha_bar(__a ) else: raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' ) lowerCamelCase_ : Dict = 1.0 - self.betas lowerCamelCase_ : List[Any] = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(__a , __a , __a ) def _lowerCAmelCase ( self : Tuple , __a : Optional[int] , __a : Optional[int]=None ) ->Union[str, Any]: if schedule_timesteps is None: lowerCamelCase_ : Optional[Any] = self.timesteps lowerCamelCase_ : Tuple = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: lowerCamelCase_ : Optional[Any] = 1 if len(__a ) > 1 else 0 else: lowerCamelCase_ : List[str] = timestep.cpu().item() if torch.is_tensor(__a ) else timestep lowerCamelCase_ : int = self._index_counter[timestep_int] return indices[pos].item() @property def _lowerCAmelCase ( self : Union[str, Any] ) ->List[str]: # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def _lowerCAmelCase ( self : Dict , __a : torch.FloatTensor , __a : Union[float, torch.FloatTensor] , ) ->torch.FloatTensor: lowerCamelCase_ : Any = self.index_for_timestep(__a ) if self.state_in_first_order: lowerCamelCase_ : Any = self.sigmas[step_index] else: lowerCamelCase_ : Optional[Any] = self.sigmas_interpol[step_index] lowerCamelCase_ : Any = sample / ((sigma**2 + 1) ** 0.5) return sample def _lowerCAmelCase ( self : List[str] , __a : int , __a : Union[str, torch.device] = None , __a : Optional[int] = None , ) ->List[str]: lowerCamelCase_ : Optional[Any] = num_inference_steps lowerCamelCase_ : int = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": lowerCamelCase_ : Optional[Any] = np.linspace(0 , num_train_timesteps - 1 , __a , dtype=__a )[::-1].copy() elif self.config.timestep_spacing == "leading": lowerCamelCase_ : Union[str, Any] = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 lowerCamelCase_ : str = (np.arange(0 , __a ) * step_ratio).round()[::-1].copy().astype(__a ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": lowerCamelCase_ : Optional[Any] = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 lowerCamelCase_ : str = (np.arange(__a , 0 , -step_ratio )).round().copy().astype(__a ) timesteps -= 1 else: raise ValueError( F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' ) lowerCamelCase_ : List[Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) lowerCamelCase_ : Optional[int] = torch.from_numpy(np.log(__a ) ).to(__a ) lowerCamelCase_ : Tuple = np.interp(__a , np.arange(0 , len(__a ) ) , __a ) lowerCamelCase_ : Any = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) lowerCamelCase_ : Dict = torch.from_numpy(__a ).to(device=__a ) # interpolate sigmas lowerCamelCase_ : List[str] = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp() lowerCamelCase_ : Tuple = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) lowerCamelCase_ : Dict = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(__a ).startswith("""mps""" ): # mps does not support float64 lowerCamelCase_ : Optional[int] = torch.from_numpy(__a ).to(__a , dtype=torch.floataa ) else: lowerCamelCase_ : List[str] = torch.from_numpy(__a ).to(__a ) # interpolate timesteps lowerCamelCase_ : Optional[int] = self.sigma_to_t(__a ).to(__a , dtype=timesteps.dtype ) lowerCamelCase_ : Dict = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten() lowerCamelCase_ : Union[str, Any] = torch.cat([timesteps[:1], interleaved_timesteps] ) lowerCamelCase_ : List[str] = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter lowerCamelCase_ : str = defaultdict(__a ) def _lowerCAmelCase ( self : Any , __a : Dict ) ->str: # get log sigma lowerCamelCase_ : List[str] = sigma.log() # get distribution lowerCamelCase_ : Any = log_sigma - self.log_sigmas[:, None] # get sigmas range lowerCamelCase_ : Optional[int] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) lowerCamelCase_ : Any = low_idx + 1 lowerCamelCase_ : Optional[int] = self.log_sigmas[low_idx] lowerCamelCase_ : List[Any] = self.log_sigmas[high_idx] # interpolate sigmas lowerCamelCase_ : List[str] = (low - log_sigma) / (low - high) lowerCamelCase_ : Optional[Any] = w.clamp(0 , 1 ) # transform interpolation to time range lowerCamelCase_ : Optional[Any] = (1 - w) * low_idx + w * high_idx lowerCamelCase_ : Tuple = t.view(sigma.shape ) return t @property def _lowerCAmelCase ( self : Tuple ) ->Optional[Any]: return self.sample is None def _lowerCAmelCase ( self : Optional[Any] , __a : Union[torch.FloatTensor, np.ndarray] , __a : Union[float, torch.FloatTensor] , __a : Union[torch.FloatTensor, np.ndarray] , __a : bool = True , ) ->Union[SchedulerOutput, Tuple]: lowerCamelCase_ : int = self.index_for_timestep(__a ) # advance index counter by 1 lowerCamelCase_ : Tuple = timestep.cpu().item() if torch.is_tensor(__a ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: lowerCamelCase_ : Optional[int] = self.sigmas[step_index] lowerCamelCase_ : Union[str, Any] = self.sigmas_interpol[step_index + 1] lowerCamelCase_ : Dict = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method lowerCamelCase_ : Union[str, Any] = self.sigmas[step_index - 1] lowerCamelCase_ : Optional[Any] = self.sigmas_interpol[step_index] lowerCamelCase_ : Tuple = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API lowerCamelCase_ : List[Any] = 0 lowerCamelCase_ : Dict = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": lowerCamelCase_ : List[Any] = sigma_hat if self.state_in_first_order else sigma_interpol lowerCamelCase_ : Tuple = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": lowerCamelCase_ : Optional[int] = sigma_hat if self.state_in_first_order else sigma_interpol lowerCamelCase_ : Dict = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError("""prediction_type not implemented yet: sample""" ) else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order lowerCamelCase_ : Optional[int] = (sample - pred_original_sample) / sigma_hat # 3. delta timestep lowerCamelCase_ : str = sigma_interpol - sigma_hat # store for 2nd order step lowerCamelCase_ : Any = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order lowerCamelCase_ : Tuple = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep lowerCamelCase_ : Dict = sigma_next - sigma_hat lowerCamelCase_ : str = self.sample lowerCamelCase_ : int = None lowerCamelCase_ : str = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__a ) def _lowerCAmelCase ( self : Dict , __a : torch.FloatTensor , __a : torch.FloatTensor , __a : torch.FloatTensor , ) ->torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples lowerCamelCase_ : List[str] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(__a ): # mps does not support float64 lowerCamelCase_ : Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa ) lowerCamelCase_ : int = timesteps.to(original_samples.device , dtype=torch.floataa ) else: lowerCamelCase_ : List[Any] = self.timesteps.to(original_samples.device ) lowerCamelCase_ : Optional[Any] = timesteps.to(original_samples.device ) lowerCamelCase_ : Any = [self.index_for_timestep(__a , __a ) for t in timesteps] lowerCamelCase_ : str = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): lowerCamelCase_ : Optional[Any] = sigma.unsqueeze(-1 ) lowerCamelCase_ : Tuple = original_samples + noise * sigma return noisy_samples def __len__( self : Optional[Any] ) ->int: return self.config.num_train_timesteps
171
from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def __lowerCamelCase ( A__ : int ) -> int: lowerCamelCase_ : Union[str, Any] = prime_factors(A__ ) if is_square_free(A__ ): return -1 if len(A__ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
171
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowerCAmelCase = { """configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""], """tokenization_mvp""": ["""MvpTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = ["""MvpTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ """MVP_PRETRAINED_MODEL_ARCHIVE_LIST""", """MvpForCausalLM""", """MvpForConditionalGeneration""", """MvpForQuestionAnswering""", """MvpForSequenceClassification""", """MvpModel""", """MvpPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
432
from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : int = logging.get_logger(__name__) _lowerCamelCase : Union[str, Any] = { """microsoft/xprophetnet-large-wiki100-cased""": ( """https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json""" ), } class lowercase ( SCREAMING_SNAKE_CASE_): '''simple docstring''' UpperCAmelCase : Tuple = 'xlm-prophetnet' UpperCAmelCase : int = ['past_key_values'] UpperCAmelCase : List[Any] = { 'num_attention_heads': 'num_encoder_attention_heads', } def __init__( self : List[Any] , snake_case : Optional[float] = 0.1 , snake_case : Optional[Union[str, Callable]] = "gelu" , snake_case : Optional[int] = 30522 , snake_case : Optional[int] = 1024 , snake_case : Optional[int] = 4096 , snake_case : Optional[int] = 12 , snake_case : Optional[int] = 16 , snake_case : Optional[int] = 4096 , snake_case : Optional[int] = 12 , snake_case : Optional[int] = 16 , snake_case : Optional[float] = 0.1 , snake_case : Optional[float] = 0.1 , snake_case : Optional[int] = 512 , snake_case : Optional[float] = 0.02 , snake_case : Optional[bool] = True , snake_case : Optional[bool] = True , snake_case : Optional[int] = 0 , snake_case : Optional[int] = 2 , snake_case : Optional[int] = 32 , snake_case : Optional[int] = 128 , snake_case : Optional[bool] = False , snake_case : Optional[float] = 0.0 , snake_case : Optional[bool] = True , snake_case : Optional[int] = 0 , snake_case : Optional[int] = 1 , snake_case : Optional[int] = 2 , **snake_case : Optional[int] , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size SCREAMING_SNAKE_CASE : int = hidden_size SCREAMING_SNAKE_CASE : Dict = encoder_ffn_dim SCREAMING_SNAKE_CASE : Optional[Any] = num_encoder_layers SCREAMING_SNAKE_CASE : str = num_encoder_attention_heads SCREAMING_SNAKE_CASE : Optional[Any] = decoder_ffn_dim SCREAMING_SNAKE_CASE : Any = num_decoder_layers SCREAMING_SNAKE_CASE : List[str] = num_decoder_attention_heads SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : Any = init_std # Normal(0, this parameter) SCREAMING_SNAKE_CASE : List[str] = activation_function # parameters for xlmprophetnet SCREAMING_SNAKE_CASE : Tuple = ngram SCREAMING_SNAKE_CASE : Optional[int] = num_buckets SCREAMING_SNAKE_CASE : Any = relative_max_distance SCREAMING_SNAKE_CASE : Optional[int] = disable_ngram_loss SCREAMING_SNAKE_CASE : List[str] = eps # 3 Types of Dropout SCREAMING_SNAKE_CASE : Optional[Any] = attention_dropout SCREAMING_SNAKE_CASE : Any = activation_dropout SCREAMING_SNAKE_CASE : Optional[int] = dropout SCREAMING_SNAKE_CASE : Tuple = use_cache super().__init__( pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , is_encoder_decoder=snake_case , add_cross_attention=snake_case , decoder_start_token_id=snake_case , **snake_case , ) @property def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def lowerCamelCase_ ( self : List[Any] , snake_case : Optional[int] ): '''simple docstring''' raise NotImplementedError( 'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and' ' `num_decoder_layers`.' )
352
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __lowerCAmelCase : str ={ 'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[int] =[ 'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST', 'GPTBigCodeForSequenceClassification', 'GPTBigCodeForTokenClassification', 'GPTBigCodeForCausalLM', 'GPTBigCodeModel', 'GPTBigCodePreTrainedModel', ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys __lowerCAmelCase : int =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
709
from __future__ import annotations def _UpperCamelCase ( lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : list[list[int]] = [] __SCREAMING_SNAKE_CASE : list[int] = [] __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 __SCREAMING_SNAKE_CASE : Any = sum(lowercase__ ) create_state_space_tree(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) return result def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ): if sum(lowercase__ ) > max_sum or (remaining_nums_sum + sum(lowercase__ )) < max_sum: return if sum(lowercase__ ) == max_sum: result.append(lowercase__ ) return for index in range(lowercase__ , len(lowercase__ ) ): create_state_space_tree( lowercase__ , lowercase__ , index + 1 , [*path, nums[index]] , lowercase__ , remaining_nums_sum - nums[index] , ) __lowerCAmelCase : List[str] =[3, 3_4, 4, 1_2, 5, 2] __lowerCAmelCase : str =9 __lowerCAmelCase : List[str] =generate_sum_of_subsets_soln(nums, max_sum) print(*result)
260
0
"""simple docstring""" def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int: return 1 if input_a == input_a else 0 def snake_case ( ) -> None: assert xnor_gate(0 , 0 ) == 1 assert xnor_gate(0 , 1 ) == 0 assert xnor_gate(1 , 0 ) == 0 assert xnor_gate(1 , 1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
103
"""simple docstring""" from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
633
0
_lowerCamelCase : List[str] = { 0: """0""", 1: """1""", 2: """2""", 3: """3""", 4: """4""", 5: """5""", 6: """6""", 7: """7""", 8: """8""", 9: """9""", 10: """a""", 11: """b""", 12: """c""", 13: """d""", 14: """e""", 15: """f""", } def __a ( __lowerCAmelCase ) -> str: assert type(__lowerCAmelCase ) in (int, float) and decimal == int(__lowerCAmelCase ) SCREAMING_SNAKE_CASE : List[Any] = int(__lowerCAmelCase ) SCREAMING_SNAKE_CASE : Optional[int] = '' SCREAMING_SNAKE_CASE : Union[str, Any] = False if decimal < 0: SCREAMING_SNAKE_CASE : List[str] = True decimal *= -1 while decimal > 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = divmod(__lowerCAmelCase , 16 ) SCREAMING_SNAKE_CASE : Union[str, Any] = values[remainder] + hexadecimal SCREAMING_SNAKE_CASE : Union[str, Any] = '0x' + hexadecimal if negative: SCREAMING_SNAKE_CASE : Union[str, Any] = '-' + hexadecimal return hexadecimal if __name__ == "__main__": import doctest doctest.testmod()
308
import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures _lowerCamelCase : Dict = logging.get_logger(__name__) @dataclass class lowercase : '''simple docstring''' UpperCAmelCase : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys())}) UpperCAmelCase : str = field( metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'}) UpperCAmelCase : int = field( default=128 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) UpperCAmelCase : bool = field( default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Overwrite the cached training and evaluation sets'}) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.task_name.lower() class lowercase ( SCREAMING_SNAKE_CASE_): '''simple docstring''' UpperCAmelCase : List[Any] = 'train' UpperCAmelCase : Optional[Any] = 'dev' UpperCAmelCase : Optional[int] = 'test' class lowercase ( SCREAMING_SNAKE_CASE_): '''simple docstring''' UpperCAmelCase : GlueDataTrainingArguments UpperCAmelCase : str UpperCAmelCase : List[InputFeatures] def __init__( self : Union[str, Any] , snake_case : GlueDataTrainingArguments , snake_case : PreTrainedTokenizerBase , snake_case : Optional[int] = None , snake_case : Union[str, Split] = Split.train , snake_case : Optional[str] = None , ): '''simple docstring''' warnings.warn( 'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets ' 'library. You can have a look at this example script for pointers: ' 'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , snake_case , ) SCREAMING_SNAKE_CASE : Tuple = args SCREAMING_SNAKE_CASE : int = glue_processors[args.task_name]() SCREAMING_SNAKE_CASE : str = glue_output_modes[args.task_name] if isinstance(snake_case , snake_case ): try: SCREAMING_SNAKE_CASE : Any = Split[mode] except KeyError: raise KeyError('mode is not a valid split name' ) # Load data features from cache or dataset file SCREAMING_SNAKE_CASE : Optional[int] = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , ) SCREAMING_SNAKE_CASE : Any = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = label_list[2], label_list[1] SCREAMING_SNAKE_CASE : List[Any] = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. SCREAMING_SNAKE_CASE : Union[str, Any] = cached_features_file + '.lock' with FileLock(snake_case ): if os.path.exists(snake_case ) and not args.overwrite_cache: SCREAMING_SNAKE_CASE : Optional[int] = time.time() SCREAMING_SNAKE_CASE : int = torch.load(snake_case ) logger.info( f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start ) else: logger.info(f'''Creating features from dataset file at {args.data_dir}''' ) if mode == Split.dev: SCREAMING_SNAKE_CASE : str = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: SCREAMING_SNAKE_CASE : Dict = self.processor.get_test_examples(args.data_dir ) else: SCREAMING_SNAKE_CASE : str = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: SCREAMING_SNAKE_CASE : Union[str, Any] = examples[:limit_length] SCREAMING_SNAKE_CASE : Optional[Any] = glue_convert_examples_to_features( snake_case , snake_case , max_length=args.max_seq_length , label_list=snake_case , output_mode=self.output_mode , ) SCREAMING_SNAKE_CASE : Tuple = time.time() torch.save(self.features , snake_case ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self : int ): '''simple docstring''' return len(self.features ) def __getitem__( self : Dict , snake_case : Optional[int] ): '''simple docstring''' return self.features[i] def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return self.label_list
308
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase__ : Optional[Any] = { """configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Tuple = ["""BloomTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Dict = [ """BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""", """BloomForCausalLM""", """BloomModel""", """BloomPreTrainedModel""", """BloomForSequenceClassification""", """BloomForTokenClassification""", """BloomForQuestionAnswering""", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys UpperCamelCase__ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
387
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class lowerCamelCase_ ( a_ , a_ , a_ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = StableUnCLIPPipeline SCREAMING_SNAKE_CASE_ = TEXT_TO_IMAGE_PARAMS SCREAMING_SNAKE_CASE_ = TEXT_TO_IMAGE_BATCH_PARAMS SCREAMING_SNAKE_CASE_ = TEXT_TO_IMAGE_IMAGE_PARAMS SCREAMING_SNAKE_CASE_ = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false SCREAMING_SNAKE_CASE_ = False def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' a = 32 a = embedder_hidden_size # prior components torch.manual_seed(0 ) a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) a = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=__lowerCamelCase ,projection_dim=__lowerCamelCase ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,) ) torch.manual_seed(0 ) a = PriorTransformer( num_attention_heads=2 ,attention_head_dim=12 ,embedding_dim=__lowerCamelCase ,num_layers=1 ,) torch.manual_seed(0 ) a = DDPMScheduler( variance_type='''fixed_small_log''' ,prediction_type='''sample''' ,num_train_timesteps=10_00 ,clip_sample=__lowerCamelCase ,clip_sample_range=5.0 ,beta_schedule='''squaredcos_cap_v2''' ,) # regular denoising components torch.manual_seed(0 ) a = StableUnCLIPImageNormalizer(embedding_dim=__lowerCamelCase ) a = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' ) torch.manual_seed(0 ) a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) a = CLIPTextModel( CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=__lowerCamelCase ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,) ) torch.manual_seed(0 ) a = UNetaDConditionModel( sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') ,up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') ,block_out_channels=(32, 64) ,attention_head_dim=(2, 4) ,class_embed_type='''projection''' ,projection_class_embeddings_input_dim=embedder_projection_dim * 2 ,cross_attention_dim=__lowerCamelCase ,layers_per_block=1 ,upcast_attention=__lowerCamelCase ,use_linear_projection=__lowerCamelCase ,) torch.manual_seed(0 ) a = DDIMScheduler( beta_schedule='''scaled_linear''' ,beta_start=0.00_085 ,beta_end=0.012 ,prediction_type='''v_prediction''' ,set_alpha_to_one=__lowerCamelCase ,steps_offset=1 ,) torch.manual_seed(0 ) a = AutoencoderKL() a = { # prior components '''prior_tokenizer''': prior_tokenizer, '''prior_text_encoder''': prior_text_encoder, '''prior''': prior, '''prior_scheduler''': prior_scheduler, # image noising components '''image_normalizer''': image_normalizer, '''image_noising_scheduler''': image_noising_scheduler, # regular denoising components '''tokenizer''': tokenizer, '''text_encoder''': text_encoder, '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, } return components def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ,__lowerCamelCase : Dict ,__lowerCamelCase : int=0 ): '''simple docstring''' if str(__lowerCamelCase ).startswith('''mps''' ): a = torch.manual_seed(__lowerCamelCase ) else: a = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) a = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''prior_num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def SCREAMING_SNAKE_CASE_ ( self : Any ): '''simple docstring''' a = torch_device == '''cpu''' self._test_attention_slicing_forward_pass(test_max_difference=__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' a = torch_device in ['''cpu''', '''mps'''] self._test_inference_batch_single_identical(test_max_difference=__lowerCamelCase ) @slow @require_torch_gpu class lowerCamelCase_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' a = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' ) a = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' ,torch_dtype=torch.floataa ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() a = torch.Generator(device='''cpu''' ).manual_seed(0 ) a = pipe('''anime turle''' ,generator=__lowerCamelCase ,output_type='''np''' ) a = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__lowerCamelCase ,__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() a = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' ,torch_dtype=torch.floataa ) a = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() a = pipe( '''anime turtle''' ,prior_num_inference_steps=2 ,num_inference_steps=2 ,output_type='''np''' ,) a = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
387
1
import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset __a = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class UpperCamelCase__( nn.Module ): """simple docstring""" def __init__( self : Optional[int] , snake_case__ : List[str] ): """simple docstring""" super().__init__() A =torchvision.models.resnetaaa(pretrained=snake_case__ ) A =list(model.children() )[:-2] A =nn.Sequential(*snake_case__ ) A =nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def _a ( self : Union[str, Any] , snake_case__ : Optional[Any] ): """simple docstring""" A =self.pool(self.model(snake_case__ ) ) A =torch.flatten(snake_case__ , start_dim=2 ) A =out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" def __init__( self : str , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : Tuple ): """simple docstring""" A =[json.loads(snake_case__ ) for l in open(snake_case__ )] A =os.path.dirname(snake_case__ ) A =tokenizer A =labels A =len(snake_case__ ) A =max_seq_length A =transforms def __len__( self : Any ): """simple docstring""" return len(self.data ) def __getitem__( self : Union[str, Any] , snake_case__ : str ): """simple docstring""" A =torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=snake_case__ ) ) A , A , A =sentence[0], sentence[1:-1], sentence[-1] A =sentence[: self.max_seq_length] A =torch.zeros(self.n_classes ) A =1 A =Image.open(os.path.join(self.data_dir , self.data[index]["img"] ) ).convert("RGB" ) A =self.transforms(snake_case__ ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def _a ( self : Dict ): """simple docstring""" A =Counter() for row in self.data: label_freqs.update(row["label"] ) return label_freqs def UpperCamelCase_ ( a_ ) ->List[str]: A =[len(row["sentence"] ) for row in batch] A , A =len(a_ ), max(a_ ) A =torch.zeros(a_ , a_ , dtype=torch.long ) A =torch.zeros(a_ , a_ , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(a_ , a_ ) ): A =input_row["sentence"] A =1 A =torch.stack([row["image"] for row in batch] ) A =torch.stack([row["label"] for row in batch] ) A =torch.stack([row["image_start_token"] for row in batch] ) A =torch.stack([row["image_end_token"] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def UpperCamelCase_ ( ) ->Optional[int]: return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def UpperCamelCase_ ( ) ->Optional[Any]: return transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ), ] )
689
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __a = logging.get_logger(__name__) __a = {"""vocab_file""": """vocab.txt"""} __a = { """vocab_file""": { """openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""", }, } __a = { """openbmb/cpm-ant-10b""": 1_0_2_4, } def UpperCamelCase_ ( a_ ) ->List[Any]: A =collections.OrderedDict() with open(a_ , "r" , encoding="utf-8" ) as reader: A =reader.readlines() for index, token in enumerate(a_ ): A =token.rstrip("\n" ) A =index return vocab class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , snake_case__ : int , snake_case__ : int="<unk>" , snake_case__ : Optional[Any]=2_00 ): """simple docstring""" A =vocab A =unk_token A =max_input_chars_per_word def _a ( self : Optional[Any] , snake_case__ : Tuple ): """simple docstring""" A =list(snake_case__ ) if len(snake_case__ ) > self.max_input_chars_per_word: return [self.unk_token] A =0 A =[] while start < len(snake_case__ ): A =len(snake_case__ ) A =None while start < end: A ="".join(chars[start:end] ) if substr in self.vocab: A =substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(snake_case__ ) A =end return sub_tokens class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" _A = VOCAB_FILES_NAMES _A = PRETRAINED_VOCAB_FILES_MAP _A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A = ["input_ids", "attention_mask"] _A = False def __init__( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : Any="<d>" , snake_case__ : Optional[int]="</d>" , snake_case__ : Optional[int]="<s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : List[str]="<pad>" , snake_case__ : Any="<unk>" , snake_case__ : List[str]="</n>" , snake_case__ : Any="</_>" , snake_case__ : List[str]="left" , **snake_case__ : Optional[int] , ): """simple docstring""" requires_backends(self , ["jieba"] ) super().__init__( bod_token=snake_case__ , eod_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , unk_token=snake_case__ , line_token=snake_case__ , space_token=snake_case__ , padding_side=snake_case__ , **snake_case__ , ) A =bod_token A =eod_token A =load_vocab(snake_case__ ) A =self.encoder[space_token] A =self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) ) A ={v: k for k, v in self.encoder.items()} A =WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def _a ( self : Dict ): """simple docstring""" return self.encoder[self.bod_token] @property def _a ( self : List[str] ): """simple docstring""" return self.encoder[self.eod_token] @property def _a ( self : Any ): """simple docstring""" return self.encoder["\n"] @property def _a ( self : List[str] ): """simple docstring""" return len(self.encoder ) def _a ( self : Tuple ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def _a ( self : Tuple , snake_case__ : int ): """simple docstring""" A =[] for x in jieba.cut(snake_case__ , cut_all=snake_case__ ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(snake_case__ ) ) return output_tokens def _a ( self : List[Any] , snake_case__ : List[Any] , **snake_case__ : str ): """simple docstring""" A =[i for i in token_ids if i >= 0] A =[ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(snake_case__ , **snake_case__ ) def _a ( self : List[Any] , snake_case__ : int ): """simple docstring""" return token in self.encoder def _a ( self : Optional[Any] , snake_case__ : List[str] ): """simple docstring""" return "".join(snake_case__ ) def _a ( self : List[Any] , snake_case__ : Optional[Any] ): """simple docstring""" return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) ) def _a ( self : Dict , snake_case__ : Optional[int] ): """simple docstring""" return self.decoder.get(snake_case__ , self.unk_token ) def _a ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[str] = None ): """simple docstring""" if os.path.isdir(snake_case__ ): A =os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: A =(filename_prefix + "-" if filename_prefix else "") + save_directory A =0 if " " in self.encoder: A =self.encoder[" "] del self.encoder[" "] if "\n" in self.encoder: A =self.encoder["\n"] del self.encoder["\n"] A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) ) with open(snake_case__ , "w" , encoding="utf-8" ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' " Please check that the vocabulary is not corrupted!" ) A =token_index writer.write(token + "\n" ) index += 1 return (vocab_file,) def _a ( self : Any , snake_case__ : List[int] , snake_case__ : List[int] = None ): """simple docstring""" if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def _a ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ ) if token_ids_a is not None: return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) return [1] + ([0] * len(snake_case__ ))
689
1
import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor UpperCamelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( __snake_case ): def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ): warnings.warn( 'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use CLIPImageProcessor instead.' , _lowerCAmelCase , ) super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
66
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : List[str] ): """simple docstring""" A_ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] A_ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } A_ = f'''{src_lang}-{tgt_lang}''' A_ = f''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - facebook license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}. For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616). The abbreviation FSMT stands for FairSeqMachineTranslation All four models are available: * [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) * [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en) * [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de) * [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "facebook/wmt19-{src_lang}-{tgt_lang}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias - The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981) ## Training data Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616). ## Eval results pair | fairseq | transformers -------|---------|---------- {pair} | {scores[pair][0]} | {scores[pair][1]} The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support: - model ensemble, therefore the best performing checkpoint was ported (``model4.pt``). - re-ranking The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=15 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`. ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ```bibtex @inproceedings{{..., year={{2020}}, title={{Facebook FAIR\'s WMT19 News Translation Task Submission}}, author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}}, booktitle={{Proc. of WMT}}, }} ``` ## TODO - port model ensemble (fairseq uses 4 model checkpoints) ''' os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase ) A_ = os.path.join(__UpperCamelCase ,"README.md" ) print(f'''Generating {path}''' ) with open(__UpperCamelCase ,"w" ,encoding="utf-8" ) as f: f.write(__UpperCamelCase ) # make sure we are under the root of the project __a :Optional[Any] = Path(__file__).resolve().parent.parent.parent __a :Optional[Any] = repo_dir / 'model_cards' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: __a , __a , __a :int = model_name.split('-') __a :str = model_cards_dir / 'facebook' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
86
0
"""simple docstring""" import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig a_ : Tuple = logging.get_logger(__name__) # General docstring a_ : List[str] = '''PoolFormerConfig''' # Base docstring a_ : Optional[Any] = '''sail/poolformer_s12''' a_ : List[Any] = [1, 5_12, 7, 7] # Image classification docstring a_ : Any = '''sail/poolformer_s12''' a_ : Optional[int] = '''tabby, tabby cat''' a_ : Optional[Any] = [ '''sail/poolformer_s12''', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def UpperCAmelCase ( A__: Optional[Any] , A__: float = 0.0 , A__: bool = False ) -> Tuple: if drop_prob == 0.0 or not training: return input __lowerCamelCase : Dict = 1 - drop_prob __lowerCamelCase : List[Any] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets __lowerCamelCase : List[Any] = keep_prob + torch.rand(A__ , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize __lowerCamelCase : Any = input.div(A__ ) * random_tensor return output class __lowercase( nn.Module ): '''simple docstring''' def __init__( self , __a = None ): super().__init__() __lowerCamelCase : int = drop_prob def snake_case_ ( self , __a ): return drop_path(__a , self.drop_prob , self.training ) def snake_case_ ( self ): return "p={}".format(self.drop_prob ) class __lowercase( nn.Module ): '''simple docstring''' def __init__( self , __a , __a , __a , __a , __a , __a=None ): super().__init__() __lowerCamelCase : int = patch_size if isinstance(__a , collections.abc.Iterable ) else (patch_size, patch_size) __lowerCamelCase : int = stride if isinstance(__a , collections.abc.Iterable ) else (stride, stride) __lowerCamelCase : Optional[int] = padding if isinstance(__a , collections.abc.Iterable ) else (padding, padding) __lowerCamelCase : Optional[Any] = nn.Convad(__a , __a , kernel_size=__a , stride=__a , padding=__a ) __lowerCamelCase : List[str] = norm_layer(__a ) if norm_layer else nn.Identity() def snake_case_ ( self , __a ): __lowerCamelCase : List[Any] = self.projection(__a ) __lowerCamelCase : Dict = self.norm(__a ) return embeddings class __lowercase( nn.GroupNorm ): '''simple docstring''' def __init__( self , __a , **__a ): super().__init__(1 , __a , **__a ) class __lowercase( nn.Module ): '''simple docstring''' def __init__( self , __a ): super().__init__() __lowerCamelCase : str = nn.AvgPoolad(__a , stride=1 , padding=pool_size // 2 , count_include_pad=__a ) def snake_case_ ( self , __a ): return self.pool(__a ) - hidden_states class __lowercase( nn.Module ): '''simple docstring''' def __init__( self , __a , __a , __a , __a ): super().__init__() __lowerCamelCase : Any = nn.Convad(__a , __a , 1 ) __lowerCamelCase : Dict = nn.Convad(__a , __a , 1 ) __lowerCamelCase : List[Any] = PoolFormerDropPath(__a ) if isinstance(config.hidden_act , __a ): __lowerCamelCase : List[str] = ACTaFN[config.hidden_act] else: __lowerCamelCase : str = config.hidden_act def snake_case_ ( self , __a ): __lowerCamelCase : int = self.conva(__a ) __lowerCamelCase : Dict = self.act_fn(__a ) __lowerCamelCase : List[str] = self.drop(__a ) __lowerCamelCase : int = self.conva(__a ) __lowerCamelCase : str = self.drop(__a ) return hidden_states class __lowercase( nn.Module ): '''simple docstring''' def __init__( self , __a , __a , __a , __a , __a , __a ): super().__init__() __lowerCamelCase : Tuple = PoolFormerPooling(__a ) __lowerCamelCase : Union[str, Any] = PoolFormerOutput(__a , __a , __a , __a ) __lowerCamelCase : List[Any] = PoolFormerGroupNorm(__a ) __lowerCamelCase : List[Any] = PoolFormerGroupNorm(__a ) # Useful for training neural nets __lowerCamelCase : Any = PoolFormerDropPath(__a ) if drop_path > 0.0 else nn.Identity() __lowerCamelCase : Tuple = config.use_layer_scale if config.use_layer_scale: __lowerCamelCase : List[str] = nn.Parameter( config.layer_scale_init_value * torch.ones((__a) ) , requires_grad=__a ) __lowerCamelCase : Optional[int] = nn.Parameter( config.layer_scale_init_value * torch.ones((__a) ) , requires_grad=__a ) def snake_case_ ( self , __a ): if self.use_layer_scale: __lowerCamelCase : Union[str, Any] = self.pooling(self.before_norm(__a ) ) __lowerCamelCase : Any = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection __lowerCamelCase : Optional[Any] = hidden_states + self.drop_path(__a ) __lowerCamelCase : Tuple = () __lowerCamelCase : Optional[Any] = self.output(self.after_norm(__a ) ) __lowerCamelCase : List[Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection __lowerCamelCase : List[Any] = hidden_states + self.drop_path(__a ) __lowerCamelCase : Optional[Any] = (output,) + outputs return outputs else: __lowerCamelCase : Tuple = self.drop_path(self.pooling(self.before_norm(__a ) ) ) # First residual connection __lowerCamelCase : List[str] = pooling_output + hidden_states __lowerCamelCase : int = () # Second residual connection inside the PoolFormerOutput block __lowerCamelCase : List[str] = self.drop_path(self.output(self.after_norm(__a ) ) ) __lowerCamelCase : str = hidden_states + layer_output __lowerCamelCase : int = (output,) + outputs return outputs class __lowercase( nn.Module ): '''simple docstring''' def __init__( self , __a ): super().__init__() __lowerCamelCase : int = config # stochastic depth decay rule __lowerCamelCase : int = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings __lowerCamelCase : List[str] = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) __lowerCamelCase : Optional[int] = nn.ModuleList(__a ) # Transformer blocks __lowerCamelCase : Any = [] __lowerCamelCase : int = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers __lowerCamelCase : Optional[int] = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( __a , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(__a ) ) __lowerCamelCase : str = nn.ModuleList(__a ) def snake_case_ ( self , __a , __a=False , __a=True ): __lowerCamelCase : Union[str, Any] = () if output_hidden_states else None __lowerCamelCase : int = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): __lowerCamelCase , __lowerCamelCase : Any = layers # Get patch embeddings from hidden_states __lowerCamelCase : Any = embedding_layer(__a ) # Send the embeddings through the blocks for _, blk in enumerate(__a ): __lowerCamelCase : Optional[int] = blk(__a ) __lowerCamelCase : Tuple = layer_outputs[0] if output_hidden_states: __lowerCamelCase : Union[str, Any] = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=__a , hidden_states=__a ) class __lowercase( lowercase__ ): '''simple docstring''' __a : Tuple = PoolFormerConfig __a : Tuple = 'poolformer' __a : Optional[int] = 'pixel_values' __a : Optional[Any] = True def snake_case_ ( self , __a ): if isinstance(__a , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__a , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def snake_case_ ( self , __a , __a=False ): if isinstance(__a , __a ): __lowerCamelCase : Union[str, Any] = value a_ : Union[str, Any] = R''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' a_ : List[str] = R''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`PoolFormerImageProcessor.__call__`] for details. ''' @add_start_docstrings( 'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , lowercase__ , ) class __lowercase( lowercase__ ): '''simple docstring''' def __init__( self , __a ): super().__init__(__a ) __lowerCamelCase : Optional[Any] = config __lowerCamelCase : Any = PoolFormerEncoder(__a ) # Initialize weights and apply final processing self.post_init() def snake_case_ ( self ): return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(__a ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__a , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def snake_case_ ( self , __a = None , __a = None , __a = None , ): __lowerCamelCase : Union[str, Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowerCamelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values' ) __lowerCamelCase : Any = self.encoder( __a , output_hidden_states=__a , return_dict=__a , ) __lowerCamelCase : int = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=__a , hidden_states=encoder_outputs.hidden_states , ) class __lowercase( nn.Module ): '''simple docstring''' def __init__( self , __a ): super().__init__() __lowerCamelCase : Optional[Any] = nn.Linear(config.hidden_size , config.hidden_size ) def snake_case_ ( self , __a ): __lowerCamelCase : List[Any] = self.dense(__a ) return output @add_start_docstrings( '\n PoolFormer Model transformer with an image classification head on top\n ' , lowercase__ , ) class __lowercase( lowercase__ ): '''simple docstring''' def __init__( self , __a ): super().__init__(__a ) __lowerCamelCase : str = config.num_labels __lowerCamelCase : Optional[Any] = PoolFormerModel(__a ) # Final norm __lowerCamelCase : str = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head __lowerCamelCase : Optional[Any] = ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__a ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def snake_case_ ( self , __a = None , __a = None , __a = None , __a = None , ): __lowerCamelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict __lowerCamelCase : Tuple = self.poolformer( __a , output_hidden_states=__a , return_dict=__a , ) __lowerCamelCase : int = outputs[0] __lowerCamelCase : Optional[int] = self.classifier(self.norm(__a ).mean([-2, -1] ) ) __lowerCamelCase : Union[str, Any] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __lowerCamelCase : Any = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __lowerCamelCase : Any = 'single_label_classification' else: __lowerCamelCase : Optional[Any] = 'multi_label_classification' if self.config.problem_type == "regression": __lowerCamelCase : int = MSELoss() if self.num_labels == 1: __lowerCamelCase : Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze() ) else: __lowerCamelCase : Optional[Any] = loss_fct(__a , __a ) elif self.config.problem_type == "single_label_classification": __lowerCamelCase : Tuple = CrossEntropyLoss() __lowerCamelCase : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __lowerCamelCase : List[Any] = BCEWithLogitsLoss() __lowerCamelCase : Optional[Any] = loss_fct(__a , __a ) if not return_dict: __lowerCamelCase : Optional[Any] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=__a , logits=__a , hidden_states=outputs.hidden_states )
263
"""simple docstring""" import unittest from transformers import DebertaVaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaVaForMaskedLM, DebertaVaForMultipleChoice, DebertaVaForQuestionAnswering, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaModel, ) from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST class __lowercase( lowercase__ ): '''simple docstring''' def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=False , __a=True , __a="None" , __a=3 , __a=4 , __a=None , ): __lowerCamelCase : List[str] = parent __lowerCamelCase : Dict = batch_size __lowerCamelCase : str = seq_length __lowerCamelCase : Optional[int] = is_training __lowerCamelCase : Dict = use_input_mask __lowerCamelCase : Dict = use_token_type_ids __lowerCamelCase : Dict = use_labels __lowerCamelCase : Optional[Any] = vocab_size __lowerCamelCase : Any = hidden_size __lowerCamelCase : List[Any] = num_hidden_layers __lowerCamelCase : Tuple = num_attention_heads __lowerCamelCase : Any = intermediate_size __lowerCamelCase : Optional[Any] = hidden_act __lowerCamelCase : Any = hidden_dropout_prob __lowerCamelCase : Optional[Any] = attention_probs_dropout_prob __lowerCamelCase : List[Any] = max_position_embeddings __lowerCamelCase : Optional[Any] = type_vocab_size __lowerCamelCase : Dict = type_sequence_label_size __lowerCamelCase : Any = initializer_range __lowerCamelCase : Union[str, Any] = num_labels __lowerCamelCase : Tuple = num_choices __lowerCamelCase : str = relative_attention __lowerCamelCase : Optional[int] = position_biased_input __lowerCamelCase : int = pos_att_type __lowerCamelCase : str = scope def snake_case_ ( self ): __lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase : int = None if self.use_input_mask: __lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __lowerCamelCase : Tuple = None if self.use_token_type_ids: __lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase : Optional[Any] = None __lowerCamelCase : Optional[Any] = None __lowerCamelCase : int = None if self.use_labels: __lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __lowerCamelCase : Optional[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case_ ( self ): return DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def snake_case_ ( self , __a ): self.parent.assertListEqual(list(result.loss.size() ) , [] ) def snake_case_ ( self , __a , __a , __a , __a , __a , __a , __a ): __lowerCamelCase : int = DebertaVaModel(config=__a ) model.to(__a ) model.eval() __lowerCamelCase : str = model(__a , attention_mask=__a , token_type_ids=__a )[0] __lowerCamelCase : str = model(__a , token_type_ids=__a )[0] __lowerCamelCase : Optional[Any] = model(__a )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def snake_case_ ( self , __a , __a , __a , __a , __a , __a , __a ): __lowerCamelCase : List[str] = DebertaVaForMaskedLM(config=__a ) model.to(__a ) model.eval() __lowerCamelCase : Union[str, Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case_ ( self , __a , __a , __a , __a , __a , __a , __a ): __lowerCamelCase : Optional[int] = self.num_labels __lowerCamelCase : List[Any] = DebertaVaForSequenceClassification(__a ) model.to(__a ) model.eval() __lowerCamelCase : Optional[Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(__a ) def snake_case_ ( self , __a , __a , __a , __a , __a , __a , __a ): __lowerCamelCase : int = self.num_labels __lowerCamelCase : Dict = DebertaVaForTokenClassification(config=__a ) model.to(__a ) model.eval() __lowerCamelCase : int = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case_ ( self , __a , __a , __a , __a , __a , __a , __a ): __lowerCamelCase : Optional[Any] = DebertaVaForQuestionAnswering(config=__a ) model.to(__a ) model.eval() __lowerCamelCase : Any = model( __a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case_ ( self , __a , __a , __a , __a , __a , __a , __a ): __lowerCamelCase : Any = DebertaVaForMultipleChoice(config=__a ) model.to(__a ) model.eval() __lowerCamelCase : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase : List[Any] = model( __a , attention_mask=__a , token_type_ids=__a , labels=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def snake_case_ ( self ): __lowerCamelCase : Any = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) : List[str] = config_and_inputs __lowerCamelCase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __lowercase( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' __a : Dict = ( ( DebertaVaModel, DebertaVaForMaskedLM, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaForQuestionAnswering, DebertaVaForMultipleChoice, ) if is_torch_available() else () ) __a : Tuple = ( { 'feature-extraction': DebertaVaModel, 'fill-mask': DebertaVaForMaskedLM, 'question-answering': DebertaVaForQuestionAnswering, 'text-classification': DebertaVaForSequenceClassification, 'token-classification': DebertaVaForTokenClassification, 'zero-shot': DebertaVaForSequenceClassification, } if is_torch_available() else {} ) __a : str = True __a : Dict = False __a : Tuple = False __a : Optional[Any] = False __a : List[Any] = False def snake_case_ ( self ): __lowerCamelCase : List[str] = DebertaVaModelTester(self ) __lowerCamelCase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 ) def snake_case_ ( self ): self.config_tester.run_common_tests() def snake_case_ ( self ): __lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*__a ) def snake_case_ ( self ): __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*__a ) def snake_case_ ( self ): __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*__a ) def snake_case_ ( self ): __lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*__a ) def snake_case_ ( self ): __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*__a ) def snake_case_ ( self ): __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_multiple_choice(*__a ) @slow def snake_case_ ( self ): for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase : Tuple = DebertaVaModel.from_pretrained(__a ) self.assertIsNotNone(__a ) @require_torch @require_sentencepiece @require_tokenizers class __lowercase( unittest.TestCase ): '''simple docstring''' @unittest.skip(reason='Model not available yet' ) def snake_case_ ( self ): pass @slow def snake_case_ ( self ): __lowerCamelCase : Any = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' ) __lowerCamelCase : Any = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] ) __lowerCamelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowerCamelCase : Union[str, Any] = model(__a , attention_mask=__a )[0] # compare the actual values for a slice. __lowerCamelCase : str = torch.tensor( [[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' )
263
1
'''simple docstring''' import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(SCREAMING_SNAKE_CASE__ ) , """Tatoeba directory does not exist.""" ) class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @cached_property def _lowercase ( self ): snake_case_ = tempfile.mkdtemp() return TatoebaConverter(save_dir=UpperCAmelCase_ ) @slow def _lowercase ( self ): self.resolver.convert_models(["heb-eng"] ) @slow def _lowercase ( self ): snake_case_ , snake_case_ = self.resolver.write_model_card("opus-mt-he-en" , dry_run=UpperCAmelCase_ ) assert mmeta["long_pair"] == "heb-eng"
508
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowercase__ = logging.get_logger(__name__) lowercase__ = { '''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''', } class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" snake_case = """focalnet""" def __init__( self , UpperCAmelCase_=2_24 , UpperCAmelCase_=4 , UpperCAmelCase_=3 , UpperCAmelCase_=96 , UpperCAmelCase_=False , UpperCAmelCase_=[1_92, 3_84, 7_68, 7_68] , UpperCAmelCase_=[2, 2, 6, 2] , UpperCAmelCase_=[2, 2, 2, 2] , UpperCAmelCase_=[3, 3, 3, 3] , UpperCAmelCase_="gelu" , UpperCAmelCase_=4.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_=False , UpperCAmelCase_=1e-4 , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=0.02 , UpperCAmelCase_=1e-5 , UpperCAmelCase_=32 , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ , ): super().__init__(**UpperCAmelCase_ ) snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = embed_dim snake_case_ = use_conv_embed snake_case_ = hidden_sizes snake_case_ = depths snake_case_ = focal_levels snake_case_ = focal_windows snake_case_ = hidden_act snake_case_ = mlp_ratio snake_case_ = hidden_dropout_prob snake_case_ = drop_path_rate snake_case_ = use_layerscale snake_case_ = layerscale_value snake_case_ = use_post_layernorm snake_case_ = use_post_layernorm_in_modulation snake_case_ = normalize_modulator snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = encoder_stride snake_case_ = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )] snake_case_ , snake_case_ = get_aligned_output_features_output_indices( out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
508
1
'''simple docstring''' from scipy.stats import spearmanr import datasets lowercase : List[str] = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n' lowercase : Tuple = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n' lowercase : int = r'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): """simple docstring""" def __A ( self : Optional[int] ) -> List[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , ) def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> Any: """simple docstring""" lowerCAmelCase = spearmanr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
159
'''simple docstring''' import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str]=1_3 , SCREAMING_SNAKE_CASE : Optional[Any]=7 , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : Optional[int]=9_9 , SCREAMING_SNAKE_CASE : Dict=3_2 , SCREAMING_SNAKE_CASE : Dict=5 , SCREAMING_SNAKE_CASE : Optional[int]=4 , SCREAMING_SNAKE_CASE : Optional[Any]=3_7 , SCREAMING_SNAKE_CASE : int="gelu" , SCREAMING_SNAKE_CASE : Any=0.1 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Dict=5_1_2 , SCREAMING_SNAKE_CASE : Union[str, Any]=1_6 , SCREAMING_SNAKE_CASE : Dict=2 , SCREAMING_SNAKE_CASE : List[Any]=0.0_2 , SCREAMING_SNAKE_CASE : Optional[int]=4 , ) -> Optional[int]: """simple docstring""" lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_attention_mask lowerCAmelCase = use_token_type_ids lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = num_choices def __A ( self : List[str] ) -> List[str]: """simple docstring""" lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = None if self.use_attention_mask: lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None if self.use_token_type_ids: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase = RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def __A ( self : Optional[int] ) -> Dict: """simple docstring""" lowerCAmelCase = self.prepare_config_and_inputs() lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def __A ( self : List[Any] ) -> Any: """simple docstring""" lowerCAmelCase = self.prepare_config_and_inputs() lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs lowerCAmelCase = True lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class _lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ): """simple docstring""" lowerCAmelCase = True lowerCAmelCase = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def __A ( self : Union[str, Any] ) -> Any: """simple docstring""" lowerCAmelCase = FlaxRobertaModelTester(self ) @slow def __A ( self : str ) -> Optional[Any]: """simple docstring""" for model_class_name in self.all_model_classes: lowerCAmelCase = model_class_name.from_pretrained("roberta-base" , from_pt=SCREAMING_SNAKE_CASE ) lowerCAmelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(SCREAMING_SNAKE_CASE )
159
1
'''simple docstring''' import torch def __lowerCamelCase ( ) -> Dict: if torch.cuda.is_available(): _a : List[Any] = torch.cuda.device_count() else: _a : Optional[Any] = 0 print(f"""Successfully ran on {num_gpus} GPUs""" ) if __name__ == "__main__": main()
358
'''simple docstring''' def __lowerCamelCase ( lowerCAmelCase_ ) -> bool: if not all(x.isalpha() for x in string ): raise ValueError('String must only contain alphabetic characters.' ) _a : Tuple = sorted(string.lower() ) return len(lowerCAmelCase_ ) == len(set(lowerCAmelCase_ ) ) if __name__ == "__main__": __lowerCAmelCase = input('''Enter a string ''').strip() __lowerCAmelCase = is_isogram(input_str) print(f"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
358
1
'''simple docstring''' import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow __magic_name__ = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ """text-classification""", """language-modeling""", """summarization""", """token-classification""", """question-answering""", ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) __magic_name__ = logging.getLogger() def snake_case_ ( ): '''simple docstring''' _snake_case = argparse.ArgumentParser() parser.add_argument("-f" ) _snake_case = parser.parse_args() return args.f def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="eval" ): '''simple docstring''' _snake_case = os.path.join(SCREAMING_SNAKE_CASE__ , f'''{split}_results.json''' ) if os.path.exists(SCREAMING_SNAKE_CASE__ ): with open(SCREAMING_SNAKE_CASE__ , "r" ) as f: return json.load(SCREAMING_SNAKE_CASE__ ) raise ValueError(f'''can\'t find {path}''' ) __magic_name__ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ): '''simple docstring''' def UpperCamelCase( self ): _snake_case = self.get_auto_remove_tmp_dir() _snake_case = F''' run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --eval_steps=2 --warmup_steps=2 --seed=42 --max_seq_length=128 '''.split() with patch.object(lowerCamelCase , "argv" , lowerCamelCase ): run_flax_glue.main() _snake_case = get_results(lowerCamelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) @slow def UpperCamelCase( self ): _snake_case = self.get_auto_remove_tmp_dir() _snake_case = F''' run_clm_flax.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir '''.split() with patch.object(lowerCamelCase , "argv" , lowerCamelCase ): run_clm_flax.main() _snake_case = get_results(lowerCamelCase ) self.assertLess(result["eval_perplexity"] , 100 ) @slow def UpperCamelCase( self ): _snake_case = self.get_auto_remove_tmp_dir() _snake_case = F''' run_summarization.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --test_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=8 --do_train --do_eval --do_predict --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate '''.split() with patch.object(lowerCamelCase , "argv" , lowerCamelCase ): run_summarization_flax.main() _snake_case = get_results(lowerCamelCase , split="test" ) self.assertGreaterEqual(result["test_rouge1"] , 10 ) self.assertGreaterEqual(result["test_rouge2"] , 2 ) self.assertGreaterEqual(result["test_rougeL"] , 7 ) self.assertGreaterEqual(result["test_rougeLsum"] , 7 ) @slow def UpperCamelCase( self ): _snake_case = self.get_auto_remove_tmp_dir() _snake_case = F''' run_mlm.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --logging_steps 2 --eval_steps 2 --do_train --do_eval --num_train_epochs=1 '''.split() with patch.object(lowerCamelCase , "argv" , lowerCamelCase ): run_mlm_flax.main() _snake_case = get_results(lowerCamelCase ) self.assertLess(result["eval_perplexity"] , 42 ) @slow def UpperCamelCase( self ): _snake_case = self.get_auto_remove_tmp_dir() _snake_case = F''' run_t5_mlm_flax.py --model_name_or_path t5-small --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir '''.split() with patch.object(lowerCamelCase , "argv" , lowerCamelCase ): run_ta_mlm_flax.main() _snake_case = get_results(lowerCamelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.42 ) @slow def UpperCamelCase( self ): # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu _snake_case = 7 if get_gpu_count() > 1 else 2 _snake_case = self.get_auto_remove_tmp_dir() _snake_case = F''' run_flax_ner.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --logging_steps 2 --eval_steps 2 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 '''.split() with patch.object(lowerCamelCase , "argv" , lowerCamelCase ): run_flax_ner.main() _snake_case = get_results(lowerCamelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertGreaterEqual(result["eval_f1"] , 0.3 ) @slow def UpperCamelCase( self ): _snake_case = self.get_auto_remove_tmp_dir() _snake_case = F''' run_qa.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=2 --do_train --do_eval --logging_steps 2 --eval_steps 2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 '''.split() with patch.object(lowerCamelCase , "argv" , lowerCamelCase ): run_qa.main() _snake_case = get_results(lowerCamelCase ) self.assertGreaterEqual(result["eval_f1"] , 30 ) self.assertGreaterEqual(result["eval_exact"] , 30 )
708
'''simple docstring''' import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("""0.12.2"""): raise Exception("""requires fairseq >= 0.12.2""") if version.parse(fairseq.__version__) > version.parse("""2"""): raise Exception("""requires fairseq < v2""") logging.set_verbosity_info() __magic_name__ : Union[str, Any] = logging.get_logger(__name__) __magic_name__ : Tuple = """Hello, World!""" __magic_name__ : Union[str, Any] = """en_XX""" def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' _snake_case = Path("data_bin" ) _snake_case = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(SCREAMING_SNAKE_CASE__ ).parent ) , checkpoint_file=Path(SCREAMING_SNAKE_CASE__ ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(SCREAMING_SNAKE_CASE__ ) , bpe="sentencepiece" , sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE__ ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , ) xmod.eval() # disable dropout print(SCREAMING_SNAKE_CASE__ ) _snake_case = xmod.model.encoder.sentence_encoder _snake_case = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , ) if classification_head: _snake_case = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0] print("Our X-MOD config:" , SCREAMING_SNAKE_CASE__ ) _snake_case = XmodForSequenceClassification(SCREAMING_SNAKE_CASE__ ) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE__ ) model.eval() # Now let's copy all the weights. # Embeddings _snake_case = xmod_sent_encoder.embed_tokens.weight _snake_case = xmod_sent_encoder.embed_positions.weight _snake_case = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them. _snake_case = xmod_sent_encoder.layernorm_embedding.weight _snake_case = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer _snake_case = model.roberta.encoder.layer[i] _snake_case = xmod_sent_encoder.layers[i] # self attention _snake_case = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ): raise AssertionError("Dimensions of self-attention weights do not match." ) _snake_case = xmod_layer.self_attn.q_proj.weight _snake_case = xmod_layer.self_attn.q_proj.bias _snake_case = xmod_layer.self_attn.k_proj.weight _snake_case = xmod_layer.self_attn.k_proj.bias _snake_case = xmod_layer.self_attn.v_proj.weight _snake_case = xmod_layer.self_attn.v_proj.bias # self-attention output _snake_case = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError("Dimensions of self-attention output weights do not match." ) _snake_case = xmod_layer.self_attn.out_proj.weight _snake_case = xmod_layer.self_attn.out_proj.bias _snake_case = xmod_layer.self_attn_layer_norm.weight _snake_case = xmod_layer.self_attn_layer_norm.bias # intermediate _snake_case = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("Dimensions of intermediate weights do not match." ) _snake_case = xmod_layer.fca.weight _snake_case = xmod_layer.fca.bias # output _snake_case = layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("Dimensions of feed-forward weights do not match." ) _snake_case = xmod_layer.fca.weight _snake_case = xmod_layer.fca.bias _snake_case = xmod_layer.final_layer_norm.weight _snake_case = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: _snake_case = xmod_layer.adapter_layer_norm.weight _snake_case = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ): raise AssertionError("Lists of language adapters do not match." ) for lang_code, adapter in xmod_layer.adapter_modules.items(): _snake_case = bert_output.adapter_modules[lang_code] _snake_case = xmod_layer.adapter_modules[lang_code] _snake_case = from_adapter.fca.weight _snake_case = from_adapter.fca.bias _snake_case = from_adapter.fca.weight _snake_case = from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: _snake_case = xmod_sent_encoder.layer_norm.weight _snake_case = xmod_sent_encoder.layer_norm.bias if classification_head: _snake_case = xmod.model.classification_heads["mnli"].dense.weight _snake_case = xmod.model.classification_heads["mnli"].dense.bias _snake_case = xmod.model.classification_heads["mnli"].out_proj.weight _snake_case = xmod.model.classification_heads["mnli"].out_proj.bias else: # LM Head _snake_case = xmod.model.encoder.lm_head.dense.weight _snake_case = xmod.model.encoder.lm_head.dense.bias _snake_case = xmod.model.encoder.lm_head.layer_norm.weight _snake_case = xmod.model.encoder.lm_head.layer_norm.bias _snake_case = xmod.model.encoder.lm_head.weight _snake_case = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. _snake_case = xmod.encode(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ) # batch of size 1 model.roberta.set_default_language(SCREAMING_SNAKE_CASE__ ) _snake_case = model(SCREAMING_SNAKE_CASE__ )[0] if classification_head: _snake_case = xmod.model.classification_heads["mnli"](xmod.extract_features(SCREAMING_SNAKE_CASE__ ) ) else: _snake_case = xmod.model(SCREAMING_SNAKE_CASE__ , lang_id=[SAMPLE_LANGUAGE] )[0] print(our_output.shape , their_output.shape ) _snake_case = torch.max(torch.abs(our_output - their_output ) ).item() print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 _snake_case = torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) print("Do both models output the same tensors?" , "🔥" if success else "💩" ) if not success: raise Exception("Something went wRoNg" ) Path(SCREAMING_SNAKE_CASE__ ).mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": __magic_name__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--classification_head""", action="""store_true""", help="""Whether to convert a final classification head.""" ) __magic_name__ : List[Any] = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
368
0
'''simple docstring''' def lowerCamelCase ( _snake_case : Any ,_snake_case : Optional[Any] ): '''simple docstring''' lowercase__ = (boundary[1] - boundary[0]) / steps lowercase__ = boundary[0] lowercase__ = boundary[1] lowercase__ = make_points(_snake_case ,_snake_case ,_snake_case ) lowercase__ = 0.0 y += (h / 2.0) * f(_snake_case ) for i in x_i: # print(i) y += h * f(_snake_case ) y += (h / 2.0) * f(_snake_case ) return y def lowerCamelCase ( _snake_case : int ,_snake_case : Union[str, Any] ,_snake_case : List[Any] ): '''simple docstring''' lowercase__ = a + h while x < (b - h): yield x lowercase__ = x + h def lowerCamelCase ( _snake_case : str ): # enter your function here '''simple docstring''' lowercase__ = (x - 0) * (x - 0) return y def lowerCamelCase ( ): '''simple docstring''' lowercase__ = 0.0 # Lower bound of integration lowercase__ = 1.0 # Upper bound of integration lowercase__ = 10.0 # define number of steps or resolution lowercase__ = [a, b] # define boundary of integration lowercase__ = method_a(_snake_case ,_snake_case ) print(f'''y = {y}''' ) if __name__ == "__main__": main()
267
'''simple docstring''' import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class snake_case (UpperCamelCase ): def __init__( self ,UpperCAmelCase_ = "▁" ,UpperCAmelCase_ = True ,UpperCAmelCase_ = "<unk>" ,UpperCAmelCase_ = "</s>" ,UpperCAmelCase_ = "<pad>" ,) -> Optional[int]: lowercase__ = { "pad": {"id": 0, "token": pad_token}, "eos": {"id": 1, "token": eos_token}, "unk": {"id": 2, "token": unk_token}, } lowercase__ = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): lowercase__ = token_dict["token"] lowercase__ = Tokenizer(Unigram() ) lowercase__ = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}" ) ," " ), normalizers.Lowercase(), ] ) lowercase__ = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=UpperCAmelCase_ ,add_prefix_space=UpperCAmelCase_ ), pre_tokenizers.Digits(individual_digits=UpperCAmelCase_ ), pre_tokenizers.Punctuation(), ] ) lowercase__ = decoders.Metaspace(replacement=UpperCAmelCase_ ,add_prefix_space=UpperCAmelCase_ ) lowercase__ = TemplateProcessing( single=F'''$A {self.special_tokens['eos']['token']}''' ,special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] ,) lowercase__ = { "model": "SentencePieceUnigram", "replacement": replacement, "add_prefix_space": add_prefix_space, } super().__init__(UpperCAmelCase_ ,UpperCAmelCase_ ) def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = 8_000 ,UpperCAmelCase_ = True ,) -> List[str]: lowercase__ = trainers.UnigramTrainer( vocab_size=UpperCAmelCase_ ,special_tokens=self.special_tokens_list ,show_progress=UpperCAmelCase_ ,) if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ): lowercase__ = [files] self._tokenizer.train(UpperCAmelCase_ ,trainer=UpperCAmelCase_ ) self.add_unk_id() def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = 8_000 ,UpperCAmelCase_ = True ,) -> Union[str, Any]: lowercase__ = trainers.UnigramTrainer( vocab_size=UpperCAmelCase_ ,special_tokens=self.special_tokens_list ,show_progress=UpperCAmelCase_ ,) self._tokenizer.train_from_iterator(UpperCAmelCase_ ,trainer=UpperCAmelCase_ ) self.add_unk_id() def _a ( self ) -> str: lowercase__ = json.loads(self._tokenizer.to_str() ) lowercase__ = self.special_tokens["unk"]["id"] lowercase__ = Tokenizer.from_str(json.dumps(UpperCAmelCase_ ) )
267
1
import os import pytest from transformers.dynamic_module_utils import get_imports _SCREAMING_SNAKE_CASE : Dict = '\nimport os\n' _SCREAMING_SNAKE_CASE : Union[str, Any] = '\ndef foo():\n import os\n return False\n' _SCREAMING_SNAKE_CASE : Dict = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n' _SCREAMING_SNAKE_CASE : Dict = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n' _SCREAMING_SNAKE_CASE : Any = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n' _SCREAMING_SNAKE_CASE : Optional[int] = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n' _SCREAMING_SNAKE_CASE : List[Any] = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n' _SCREAMING_SNAKE_CASE : Any = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n' _SCREAMING_SNAKE_CASE : List[Any] = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n' _SCREAMING_SNAKE_CASE : List[Any] = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n' _SCREAMING_SNAKE_CASE : Any = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize("case" , __magic_name__ ) def __lowerCAmelCase ( __magic_name__ , __magic_name__ ): _lowercase: int = os.path.join(__magic_name__ , "test_file.py" ) with open(__magic_name__ , "w" ) as _tmp_file: _tmp_file.write(__magic_name__ ) _lowercase: Optional[int] = get_imports(__magic_name__ ) assert parsed_imports == ["os"]
206
from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : str = { 'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json', # See all CANINE models at https://huggingface.co/models?filter=canine } class A ( lowerCamelCase_ ): '''simple docstring''' lowerCamelCase : Dict = """canine""" def __init__( self : int , _UpperCamelCase : Tuple=768 , _UpperCamelCase : List[str]=12 , _UpperCamelCase : Any=12 , _UpperCamelCase : Union[str, Any]=3_072 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : str=0.1 , _UpperCamelCase : Any=0.1 , _UpperCamelCase : Tuple=16_384 , _UpperCamelCase : Tuple=16 , _UpperCamelCase : List[str]=0.0_2 , _UpperCamelCase : List[str]=1e-12 , _UpperCamelCase : Optional[Any]=0 , _UpperCamelCase : Optional[Any]=0XE000 , _UpperCamelCase : Union[str, Any]=0XE001 , _UpperCamelCase : Optional[Any]=4 , _UpperCamelCase : Union[str, Any]=4 , _UpperCamelCase : List[Any]=8 , _UpperCamelCase : Union[str, Any]=16_384 , _UpperCamelCase : List[Any]=128 , **_UpperCamelCase : Optional[int] , ): super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase) _lowercase: Optional[int] = max_position_embeddings _lowercase: Union[str, Any] = hidden_size _lowercase: Tuple = num_hidden_layers _lowercase: str = num_attention_heads _lowercase: Any = intermediate_size _lowercase: Dict = hidden_act _lowercase: str = hidden_dropout_prob _lowercase: List[Any] = attention_probs_dropout_prob _lowercase: Union[str, Any] = initializer_range _lowercase: str = type_vocab_size _lowercase: Any = layer_norm_eps # Character config: _lowercase: Optional[int] = downsampling_rate _lowercase: int = upsampling_kernel_size _lowercase: Optional[int] = num_hash_functions _lowercase: str = num_hash_buckets _lowercase: List[str] = local_transformer_stride
206
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: __snake_case : Union[str, Any] = None __snake_case : Dict = logging.get_logger(__name__) __snake_case : Tuple = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} __snake_case : Optional[int] = { '''vocab_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''', }, '''tokenizer_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''', }, } __snake_case : Optional[int] = { '''camembert-base''': 5_12, } __snake_case : Dict = '''▁''' class lowercase_ ( _A ): a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ["""input_ids""", """attention_mask"""] a_ = CamembertTokenizer def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<mask>" , UpperCamelCase__=["<s>NOTUSED", "</s>NOTUSED"] , **UpperCamelCase__ , ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token super().__init__( UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , ) UpperCAmelCase_ = vocab_file UpperCAmelCase_ = False if not self.vocab_file else True def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase_ = [self.cls_token_id] UpperCAmelCase_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]: """simple docstring""" UpperCAmelCase_ = [self.sep_token_id] UpperCAmelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(UpperCamelCase__ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCAmelCase_ = os.path.join( UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ): copyfile(self.vocab_file , UpperCamelCase__ ) return (out_vocab_file,)
660
'''simple docstring''' import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) __snake_case : List[Any] = { '''sample_size''': 32, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 2, '''num_class_embeds''': 10_00, '''block_out_channels''': [32, 64], '''attention_head_dim''': 8, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''scale_shift''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } __snake_case : Optional[int] = { '''sample_size''': 64, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 3, '''num_class_embeds''': 10_00, '''block_out_channels''': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4], '''attention_head_dim''': 64, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''scale_shift''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } __snake_case : int = { '''sample_size''': 2_56, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 2, '''num_class_embeds''': None, '''block_out_channels''': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4], '''attention_head_dim''': 64, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''ResnetDownsampleBlock2D''', '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', '''ResnetUpsampleBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''default''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } __snake_case : Dict = { '''num_train_timesteps''': 40, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } __snake_case : Tuple = { '''num_train_timesteps''': 2_01, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } __snake_case : str = { '''num_train_timesteps''': 1_51, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } def lowerCamelCase__ ( A_ ): if isinstance(A_ , A_ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError("boolean value expected" ) def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=False ): UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.weight"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.bias"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.weight"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.bias"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.weight"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.bias"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.weight"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.bias"""] if has_skip: UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.weight"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.bias"""] return new_checkpoint def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=None ): UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 ) UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.weight"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.bias"""] UpperCAmelCase_ = weight_q.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ = bias_q.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ = weight_k.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ = bias_k.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ = weight_v.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ = bias_v.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ = ( checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 ) ) UpperCAmelCase_ = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def lowerCamelCase__ ( A_ , A_ ): UpperCAmelCase_ = torch.load(A_ , map_location="cpu" ) UpperCAmelCase_ = {} UpperCAmelCase_ = checkpoint["time_embed.0.weight"] UpperCAmelCase_ = checkpoint["time_embed.0.bias"] UpperCAmelCase_ = checkpoint["time_embed.2.weight"] UpperCAmelCase_ = checkpoint["time_embed.2.bias"] if unet_config["num_class_embeds"] is not None: UpperCAmelCase_ = checkpoint["label_emb.weight"] UpperCAmelCase_ = checkpoint["input_blocks.0.0.weight"] UpperCAmelCase_ = checkpoint["input_blocks.0.0.bias"] UpperCAmelCase_ = unet_config["down_block_types"] UpperCAmelCase_ = unet_config["layers_per_block"] UpperCAmelCase_ = unet_config["attention_head_dim"] UpperCAmelCase_ = unet_config["block_out_channels"] UpperCAmelCase_ = 1 UpperCAmelCase_ = channels_list[0] for i, layer_type in enumerate(A_ ): UpperCAmelCase_ = channels_list[i] UpperCAmelCase_ = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(A_ ): UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}""" UpperCAmelCase_ = F"""input_blocks.{current_layer}.0""" UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(A_ ): UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}""" UpperCAmelCase_ = F"""input_blocks.{current_layer}.0""" UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ ) UpperCAmelCase_ = F"""down_blocks.{i}.attentions.{j}""" UpperCAmelCase_ = F"""input_blocks.{current_layer}.1""" UpperCAmelCase_ = convert_attention( A_ , A_ , A_ , A_ , A_ ) current_layer += 1 if i != len(A_ ) - 1: UpperCAmelCase_ = F"""down_blocks.{i}.downsamplers.0""" UpperCAmelCase_ = F"""input_blocks.{current_layer}.0""" UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ ) current_layer += 1 UpperCAmelCase_ = current_channels # hardcoded the mid-block for now UpperCAmelCase_ = "mid_block.resnets.0" UpperCAmelCase_ = "middle_block.0" UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ ) UpperCAmelCase_ = "mid_block.attentions.0" UpperCAmelCase_ = "middle_block.1" UpperCAmelCase_ = convert_attention(A_ , A_ , A_ , A_ , A_ ) UpperCAmelCase_ = "mid_block.resnets.1" UpperCAmelCase_ = "middle_block.2" UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ ) UpperCAmelCase_ = 0 UpperCAmelCase_ = unet_config["up_block_types"] for i, layer_type in enumerate(A_ ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}""" UpperCAmelCase_ = F"""output_blocks.{current_layer}.0""" UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ ) current_layer += 1 if i != len(A_ ) - 1: UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0""" UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.1""" UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}""" UpperCAmelCase_ = F"""output_blocks.{current_layer}.0""" UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ ) UpperCAmelCase_ = F"""up_blocks.{i}.attentions.{j}""" UpperCAmelCase_ = F"""output_blocks.{current_layer}.1""" UpperCAmelCase_ = convert_attention( A_ , A_ , A_ , A_ , A_ ) current_layer += 1 if i != len(A_ ) - 1: UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0""" UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.2""" UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ ) UpperCAmelCase_ = checkpoint["out.0.weight"] UpperCAmelCase_ = checkpoint["out.0.bias"] UpperCAmelCase_ = checkpoint["out.2.weight"] UpperCAmelCase_ = checkpoint["out.2.bias"] return new_checkpoint if __name__ == "__main__": __snake_case : List[str] = argparse.ArgumentParser() parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''') parser.add_argument( '''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.''' ) parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''') __snake_case : List[str] = parser.parse_args() __snake_case : Any = strabool(args.class_cond) __snake_case : List[str] = os.path.basename(args.unet_path) print(F'''Checkpoint: {ckpt_name}''') # Get U-Net config if "imagenet64" in ckpt_name: __snake_case : Optional[int] = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): __snake_case : Union[str, Any] = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: __snake_case : List[str] = TEST_UNET_CONFIG else: raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''') if not args.class_cond: __snake_case : Optional[Any] = None __snake_case : Optional[int] = con_pt_to_diffuser(args.unet_path, unet_config) __snake_case : str = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: __snake_case : Tuple = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: __snake_case : Optional[int] = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): __snake_case : Union[str, Any] = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''') __snake_case : Optional[Any] = CMStochasticIterativeScheduler(**scheduler_config) __snake_case : Dict = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
660
1
'''simple docstring''' import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# lowercase = [ # (stable-diffusion, HF Diffusers) ('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''), ('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''), ('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''), ('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''), ('''input_blocks.0.0.weight''', '''conv_in.weight'''), ('''input_blocks.0.0.bias''', '''conv_in.bias'''), ('''out.0.weight''', '''conv_norm_out.weight'''), ('''out.0.bias''', '''conv_norm_out.bias'''), ('''out.2.weight''', '''conv_out.weight'''), ('''out.2.bias''', '''conv_out.bias'''), ] lowercase = [ # (stable-diffusion, HF Diffusers) ('''in_layers.0''', '''norm1'''), ('''in_layers.2''', '''conv1'''), ('''out_layers.0''', '''norm2'''), ('''out_layers.3''', '''conv2'''), ('''emb_layers.1''', '''time_emb_proj'''), ('''skip_connection''', '''conv_shortcut'''), ] lowercase = [] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks lowercase = F"""down_blocks.{i}.resnets.{j}.""" lowercase = F"""input_blocks.{3*i + j + 1}.0.""" unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 lowercase = F"""down_blocks.{i}.attentions.{j}.""" lowercase = F"""input_blocks.{3*i + j + 1}.1.""" unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks lowercase = F"""up_blocks.{i}.resnets.{j}.""" lowercase = F"""output_blocks.{3*i + j}.0.""" unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 lowercase = F"""up_blocks.{i}.attentions.{j}.""" lowercase = F"""output_blocks.{3*i + j}.1.""" unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 lowercase = F"""down_blocks.{i}.downsamplers.0.conv.""" lowercase = F"""input_blocks.{3*(i+1)}.0.op.""" unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 lowercase = F"""up_blocks.{i}.upsamplers.0.""" lowercase = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}.""" unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) lowercase = '''mid_block.attentions.0.''' lowercase = '''middle_block.1.''' unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): lowercase = F"""mid_block.resnets.{j}.""" lowercase = F"""middle_block.{2*j}.""" unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def UpperCAmelCase_ ( lowercase__ ): '''simple docstring''' a_ ={k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: a_ =sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: a_ =v.replace(lowercase__ , lowercase__ ) a_ =v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: a_ =v.replace(lowercase__ , lowercase__ ) a_ =v a_ ={v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# lowercase = [ # (stable-diffusion, HF Diffusers) ('''nin_shortcut''', '''conv_shortcut'''), ('''norm_out''', '''conv_norm_out'''), ('''mid.attn_1.''', '''mid_block.attentions.0.'''), ] for i in range(4): # down_blocks have two resnets for j in range(2): lowercase = F"""encoder.down_blocks.{i}.resnets.{j}.""" lowercase = F"""encoder.down.{i}.block.{j}.""" vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: lowercase = F"""down_blocks.{i}.downsamplers.0.""" lowercase = F"""down.{i}.downsample.""" vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) lowercase = F"""up_blocks.{i}.upsamplers.0.""" lowercase = F"""up.{3-i}.upsample.""" vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): lowercase = F"""decoder.up_blocks.{i}.resnets.{j}.""" lowercase = F"""decoder.up.{3-i}.block.{j}.""" vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): lowercase = F"""mid_block.resnets.{i}.""" lowercase = F"""mid.block_{i+1}.""" vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) lowercase = [ # (stable-diffusion, HF Diffusers) ('''norm.''', '''group_norm.'''), ('''q.''', '''query.'''), ('''k.''', '''key.'''), ('''v.''', '''value.'''), ('''proj_out.''', '''proj_attn.'''), ] def UpperCAmelCase_ ( lowercase__ ): '''simple docstring''' return w.reshape(*w.shape , 1 , 1 ) def UpperCAmelCase_ ( lowercase__ ): '''simple docstring''' a_ ={k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: a_ =v.replace(lowercase__ , lowercase__ ) a_ =v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: a_ =v.replace(lowercase__ , lowercase__ ) a_ =v a_ ={v: vae_state_dict[k] for k, v in mapping.items()} a_ =["q", "k", "v", "proj_out"] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if F"""mid.attn_1.{weight_name}.weight""" in k: print(F"""Reshaping {k} for SD format""" ) a_ =reshape_weight_for_sd(lowercase__ ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# lowercase = [ # (stable-diffusion, HF Diffusers) ('''resblocks.''', '''text_model.encoder.layers.'''), ('''ln_1''', '''layer_norm1'''), ('''ln_2''', '''layer_norm2'''), ('''.c_fc.''', '''.fc1.'''), ('''.c_proj.''', '''.fc2.'''), ('''.attn''', '''.self_attn'''), ('''ln_final.''', '''transformer.text_model.final_layer_norm.'''), ('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''), ('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''), ] lowercase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} lowercase = re.compile('''|'''.join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp lowercase = {'''q''': 0, '''k''': 1, '''v''': 2} def UpperCAmelCase_ ( lowercase__ ): '''simple docstring''' a_ ={} a_ ={} a_ ={} for k, v in text_enc_dict.items(): if ( k.endswith(".self_attn.q_proj.weight" ) or k.endswith(".self_attn.k_proj.weight" ) or k.endswith(".self_attn.v_proj.weight" ) ): a_ =k[: -len(".q_proj.weight" )] a_ =k[-len("q_proj.weight" )] if k_pre not in capture_qkv_weight: a_ =[None, None, None] a_ =v continue if ( k.endswith(".self_attn.q_proj.bias" ) or k.endswith(".self_attn.k_proj.bias" ) or k.endswith(".self_attn.v_proj.bias" ) ): a_ =k[: -len(".q_proj.bias" )] a_ =k[-len("q_proj.bias" )] if k_pre not in capture_qkv_bias: a_ =[None, None, None] a_ =v continue a_ =textenc_pattern.sub(lambda lowercase__ : protected[re.escape(m.group(0 ) )] , lowercase__ ) a_ =v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) a_ =textenc_pattern.sub(lambda lowercase__ : protected[re.escape(m.group(0 ) )] , lowercase__ ) a_ =torch.cat(lowercase__ ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) a_ =textenc_pattern.sub(lambda lowercase__ : protected[re.escape(m.group(0 ) )] , lowercase__ ) a_ =torch.cat(lowercase__ ) return new_state_dict def UpperCAmelCase_ ( lowercase__ ): '''simple docstring''' return text_enc_dict if __name__ == "__main__": lowercase = argparse.ArgumentParser() parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''') parser.add_argument( '''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.''' ) lowercase = parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors lowercase = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''') lowercase = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''') lowercase = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''') # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): lowercase = load_file(unet_path, device='''cpu''') else: lowercase = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''') lowercase = torch.load(unet_path, map_location='''cpu''') if osp.exists(vae_path): lowercase = load_file(vae_path, device='''cpu''') else: lowercase = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''') lowercase = torch.load(vae_path, map_location='''cpu''') if osp.exists(text_enc_path): lowercase = load_file(text_enc_path, device='''cpu''') else: lowercase = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''') lowercase = torch.load(text_enc_path, map_location='''cpu''') # Convert the UNet model lowercase = convert_unet_state_dict(unet_state_dict) lowercase = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()} # Convert the VAE model lowercase = convert_vae_state_dict(vae_state_dict) lowercase = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper lowercase = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm lowercase = {'''transformer.''' + k: v for k, v in text_enc_dict.items()} lowercase = convert_text_enc_state_dict_vaa(text_enc_dict) lowercase = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()} else: lowercase = convert_text_enc_state_dict(text_enc_dict) lowercase = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint lowercase = {**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: lowercase = {k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: lowercase = {'''state_dict''': state_dict} torch.save(state_dict, args.checkpoint_path)
41
'''simple docstring''' from __future__ import annotations lowercase = [] def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ): '''simple docstring''' for i in range(len(lowercase__ ) ): if board[row][i] == 1: return False for i in range(len(lowercase__ ) ): if board[i][column] == 1: return False for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , len(lowercase__ ) ) ): if board[i][j] == 1: return False return True def UpperCAmelCase_ ( lowercase__ , lowercase__ ): '''simple docstring''' if row >= len(lowercase__ ): solution.append(lowercase__ ) printboard(lowercase__ ) print() return True for i in range(len(lowercase__ ) ): if is_safe(lowercase__ , lowercase__ , lowercase__ ): a_ =1 solve(lowercase__ , row + 1 ) a_ =0 return False def UpperCAmelCase_ ( lowercase__ ): '''simple docstring''' for i in range(len(lowercase__ ) ): for j in range(len(lowercase__ ) ): if board[i][j] == 1: print("Q" , end=" " ) else: print("." , end=" " ) print() # n=int(input("The no. of queens")) lowercase = 8 lowercase = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('''The total no. of solutions are :''', len(solution))
41
1
import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class UpperCamelCase( lowerCAmelCase_ ): snake_case_ : List[str] = 'facebook/bart-large-mnli' snake_case_ : List[Any] = ( 'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which ' 'should be the text to classify, and `labels`, which should be the list of labels to use for classification. ' 'It returns the most likely label in the list of provided `labels` for the input text.' ) snake_case_ : str = 'text_classifier' snake_case_ : Any = AutoTokenizer snake_case_ : Dict = AutoModelForSequenceClassification snake_case_ : List[Any] = ['text', ['text']] snake_case_ : Optional[Any] = ['text'] def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Optional[int]: '''simple docstring''' super().setup() __snake_case = self.model.config __snake_case = -1 for idx, label in config.idalabel.items(): if label.lower().startswith("entail" ): __snake_case = int(A__ ) if self.entailment_id == -1: raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." ) def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]: '''simple docstring''' __snake_case = labels return self.pre_processor( [text] * len(A__ ) , [f'''This example is {label}''' for label in labels] , return_tensors="pt" , padding="max_length" , ) def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE : Any ) -> str: '''simple docstring''' __snake_case = outputs.logits __snake_case = torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
371
'''simple docstring''' from torch import nn def __lowercase (_lowercase ) -> Union[str, Any]: """simple docstring""" if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(f"Unsupported activation function: {act_fn}" )
150
0
from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __a = logging.get_logger(__name__) __a = { 'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class lowercase__( UpperCAmelCase ): """simple docstring""" a :Union[str, Any] = 'gptj' a :str = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : Any , SCREAMING_SNAKE_CASE_ : str=5_0_4_0_0 , SCREAMING_SNAKE_CASE_ : Optional[int]=2_0_4_8 , SCREAMING_SNAKE_CASE_ : Optional[int]=4_0_9_6 , SCREAMING_SNAKE_CASE_ : List[Any]=2_8 , SCREAMING_SNAKE_CASE_ : Tuple=1_6 , SCREAMING_SNAKE_CASE_ : Any=6_4 , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : Optional[int]="gelu_new" , SCREAMING_SNAKE_CASE_ : Dict=0.0 , SCREAMING_SNAKE_CASE_ : int=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : List[str]=1e-5 , SCREAMING_SNAKE_CASE_ : Tuple=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Optional[int]=5_0_2_5_6 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5_0_2_5_6 , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , **SCREAMING_SNAKE_CASE_ : Dict , ) -> Optional[Any]: lowercase_ = vocab_size lowercase_ = n_positions lowercase_ = n_embd lowercase_ = n_layer lowercase_ = n_head lowercase_ = n_inner lowercase_ = rotary_dim lowercase_ = activation_function lowercase_ = resid_pdrop lowercase_ = embd_pdrop lowercase_ = attn_pdrop lowercase_ = layer_norm_epsilon lowercase_ = initializer_range lowercase_ = use_cache lowercase_ = bos_token_id lowercase_ = eos_token_id super().__init__( bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , tie_word_embeddings=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) class lowercase__( UpperCAmelCase ): """simple docstring""" def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : PretrainedConfig , SCREAMING_SNAKE_CASE_ : str = "default" , SCREAMING_SNAKE_CASE_ : List[PatchingSpec] = None , SCREAMING_SNAKE_CASE_ : bool = False , ) -> Any: super().__init__(SCREAMING_SNAKE_CASE_ , task=SCREAMING_SNAKE_CASE_ , patching_specs=SCREAMING_SNAKE_CASE_ , use_past=SCREAMING_SNAKE_CASE_ ) if not getattr(self._config , '''pad_token_id''' , SCREAMING_SNAKE_CASE_ ): # TODO: how to do that better? lowercase_ = 0 @property def _lowercase ( self : Dict ) -> Mapping[str, Mapping[int, str]]: lowercase_ = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='''inputs''' ) lowercase_ = {0: '''batch''', 1: '''past_sequence + sequence'''} else: lowercase_ = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def _lowercase ( self : List[Any] ) -> int: return self._config.n_layer @property def _lowercase ( self : List[Any] ) -> int: return self._config.n_head def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[TensorType] = None , ) -> Mapping[str, Any]: lowercase_ = super(SCREAMING_SNAKE_CASE_ , self ).generate_dummy_inputs( SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ ) # We need to order the input in the way they appears in the forward() lowercase_ = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowercase_ , lowercase_ = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowercase_ = seqlen + 2 lowercase_ = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowercase_ = [ (torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(self.num_layers ) ] lowercase_ = common_inputs['''attention_mask'''] if self.use_past: lowercase_ = ordered_inputs['''attention_mask'''].dtype lowercase_ = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )] , dim=1 ) return ordered_inputs @property def _lowercase ( self : Union[str, Any] ) -> int: return 1_3
409
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __a = get_tests_dir('fixtures/test_sentencepiece_no_bos.model') @require_sentencepiece @require_tokenizers class lowercase__( UpperCAmelCase , unittest.TestCase ): """simple docstring""" a :str = PegasusTokenizer a :Any = PegasusTokenizerFast a :Optional[Any] = True a :int = True def _lowercase ( self : Any ) -> Dict: super().setUp() # We have a SentencePiece fixture for testing lowercase_ = PegasusTokenizer(SCREAMING_SNAKE_CASE_ ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _lowercase ( self : List[str] ) -> Any: return PegasusTokenizer.from_pretrained('''google/pegasus-large''' ) def _lowercase ( self : Any , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> PegasusTokenizer: return PegasusTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple ) -> Any: return ("This is a test", "This is a test") def _lowercase ( self : str ) -> Optional[Any]: lowercase_ = '''</s>''' lowercase_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Any ) -> str: lowercase_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''</s>''' ) self.assertEqual(vocab_keys[-1] , '''v''' ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1_1_0_3 ) def _lowercase ( self : Tuple ) -> List[str]: self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 ) def _lowercase ( self : Optional[Any] ) -> Tuple: lowercase_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowercase_ = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowercase_ = ( '''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important''' ''' </s> <pad> <pad> <pad>''' ) lowercase_ = rust_tokenizer([raw_input_str] , return_tensors=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ).input_ids[0] lowercase_ = py_tokenizer([raw_input_str] , return_tensors=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ).input_ids[0] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : int ) -> List[str]: lowercase_ = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word lowercase_ = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.''' lowercase_ = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1] lowercase_ = tokenizer([raw_input_str] , return_tensors=SCREAMING_SNAKE_CASE_ ).input_ids[0] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Tuple ) -> Union[str, Any]: lowercase_ = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6_1_0_3 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 1_0_3 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1_0_2_4 lowercase_ = '''To ensure a smooth flow of bank resolutions.''' lowercase_ = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1] lowercase_ = tokenizer([raw_input_str] , return_tensors=SCREAMING_SNAKE_CASE_ ).input_ids[0] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def _lowercase ( self : int ) -> Tuple: lowercase_ = ['''This is going to be way too long.''' * 1_5_0, '''short example'''] lowercase_ = ['''not super long but more than 5 tokens''', '''tiny'''] lowercase_ = self._large_tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ) lowercase_ = self._large_tokenizer( text_target=SCREAMING_SNAKE_CASE_ , max_length=5 , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 1_0_2_4) assert batch.attention_mask.shape == (2, 1_0_2_4) assert targets["input_ids"].shape == (2, 5) assert len(SCREAMING_SNAKE_CASE_ ) == 2 # input_ids, attention_mask. @slow def _lowercase ( self : int ) -> Tuple: # fmt: off lowercase_ = {'''input_ids''': [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , ) @require_sentencepiece @require_tokenizers class lowercase__( UpperCAmelCase , unittest.TestCase ): """simple docstring""" a :Dict = PegasusTokenizer a :List[Any] = PegasusTokenizerFast a :Union[str, Any] = True a :List[Any] = True def _lowercase ( self : Optional[Any] ) -> Tuple: super().setUp() # We have a SentencePiece fixture for testing lowercase_ = PegasusTokenizer(SCREAMING_SNAKE_CASE_ , offset=0 , mask_token_sent=SCREAMING_SNAKE_CASE_ , mask_token='''[MASK]''' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _lowercase ( self : Optional[Any] ) -> Optional[Any]: return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' ) def _lowercase ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> PegasusTokenizer: return PegasusTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Any: return ("This is a test", "This is a test") def _lowercase ( self : List[Any] ) -> Union[str, Any]: lowercase_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowercase_ = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowercase_ = ( '''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>''' ''' <pad> <pad> <pad>''' ) lowercase_ = rust_tokenizer([raw_input_str] , return_tensors=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ).input_ids[0] lowercase_ = py_tokenizer([raw_input_str] , return_tensors=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ).input_ids[0] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @require_torch def _lowercase ( self : Optional[Any] ) -> int: lowercase_ = ['''This is going to be way too long.''' * 1_0_0_0, '''short example'''] lowercase_ = ['''not super long but more than 5 tokens''', '''tiny'''] lowercase_ = self._large_tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ) lowercase_ = self._large_tokenizer( text_target=SCREAMING_SNAKE_CASE_ , max_length=5 , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 4_0_9_6) assert batch.attention_mask.shape == (2, 4_0_9_6) assert targets["input_ids"].shape == (2, 5) assert len(SCREAMING_SNAKE_CASE_ ) == 2 # input_ids, attention_mask. def _lowercase ( self : Optional[int] ) -> Any: lowercase_ = ( '''This is an example string that is used to test the original TF implementation against the HF''' ''' implementation''' ) lowercase_ = self._large_tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids self.assertListEqual( SCREAMING_SNAKE_CASE_ , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
409
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { "microsoft/unispeech-sat-base-100h-libri-ft": ( "https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json" ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "unispeech-sat" def __init__( self : List[Any] , _A : Dict=32 , _A : int=768 , _A : str=12 , _A : str=12 , _A : Any=3072 , _A : List[str]="gelu" , _A : Any=0.1 , _A : Union[str, Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[Any]=0.0 , _A : List[str]=0.0 , _A : Optional[Any]=0.1 , _A : str=0.1 , _A : List[str]=0.02 , _A : Optional[int]=1e-5 , _A : Dict="group" , _A : str="gelu" , _A : List[str]=(512, 512, 512, 512, 512, 512, 512) , _A : Any=(5, 2, 2, 2, 2, 2, 2) , _A : Dict=(10, 3, 3, 3, 3, 2, 2) , _A : Union[str, Any]=False , _A : str=128 , _A : Tuple=16 , _A : Optional[int]=False , _A : Dict=True , _A : Optional[Any]=0.05 , _A : Any=10 , _A : str=2 , _A : Dict=0.0 , _A : List[str]=10 , _A : Union[str, Any]=0 , _A : List[str]=320 , _A : List[Any]=2 , _A : Optional[Any]=0.1 , _A : Optional[Any]=100 , _A : List[str]=256 , _A : Any=256 , _A : List[Any]=0.1 , _A : Dict="mean" , _A : Dict=False , _A : List[str]=False , _A : List[Any]=256 , _A : Any=(512, 512, 512, 512, 1500) , _A : Any=(5, 3, 3, 1, 1) , _A : Dict=(1, 2, 3, 1, 1) , _A : str=512 , _A : Dict=0 , _A : List[str]=1 , _A : Tuple=2 , _A : Optional[Any]=504 , **_A : int , ): super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A ) _UpperCamelCase = hidden_size _UpperCamelCase = feat_extract_norm _UpperCamelCase = feat_extract_activation _UpperCamelCase = list(_A ) _UpperCamelCase = list(_A ) _UpperCamelCase = list(_A ) _UpperCamelCase = conv_bias _UpperCamelCase = num_conv_pos_embeddings _UpperCamelCase = num_conv_pos_embedding_groups _UpperCamelCase = len(self.conv_dim ) _UpperCamelCase = num_hidden_layers _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = num_attention_heads _UpperCamelCase = hidden_dropout _UpperCamelCase = attention_dropout _UpperCamelCase = activation_dropout _UpperCamelCase = feat_proj_dropout _UpperCamelCase = final_dropout _UpperCamelCase = layerdrop _UpperCamelCase = layer_norm_eps _UpperCamelCase = initializer_range _UpperCamelCase = vocab_size _UpperCamelCase = num_clusters _UpperCamelCase = do_stable_layer_norm _UpperCamelCase = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _UpperCamelCase = apply_spec_augment _UpperCamelCase = mask_time_prob _UpperCamelCase = mask_time_length _UpperCamelCase = mask_time_min_masks _UpperCamelCase = mask_feature_prob _UpperCamelCase = mask_feature_length _UpperCamelCase = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _UpperCamelCase = num_codevectors_per_group _UpperCamelCase = num_codevector_groups _UpperCamelCase = contrastive_logits_temperature _UpperCamelCase = feat_quantizer_dropout _UpperCamelCase = num_negatives _UpperCamelCase = codevector_dim _UpperCamelCase = proj_codevector_dim _UpperCamelCase = diversity_loss_weight # ctc loss _UpperCamelCase = ctc_loss_reduction _UpperCamelCase = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. _UpperCamelCase = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _UpperCamelCase = list(_A ) _UpperCamelCase = list(_A ) _UpperCamelCase = list(_A ) _UpperCamelCase = xvector_output_dim @property def UpperCamelCase_ ( self : List[str] ): return functools.reduce(operator.mul , self.conv_stride , 1 )
10
UpperCAmelCase_ = { "A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.", "H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.", "O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-", "V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----", "2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...", "8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.", ":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.", "?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-", "(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/" } # Exclamation mark is not in ITU-R recommendation # fmt: on UpperCAmelCase_ = {value: key for key, value in MORSE_CODE_DICT.items()} def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str: """simple docstring""" return " ".join(MORSE_CODE_DICT[char] for char in message.upper() ) def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str: """simple docstring""" return "".join(REVERSE_DICT[char] for char in message.split() ) def A__ ( ) -> None: """simple docstring""" _UpperCAmelCase = '''Morse code here!''' print(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = encrypt(SCREAMING_SNAKE_CASE_ ) print(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE_ ) print(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": main()
32
0
import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class __UpperCamelCase ( lowercase ): SCREAMING_SNAKE_CASE__ = (PNDMScheduler,) SCREAMING_SNAKE_CASE__ = (('num_inference_steps', 50),) def __A ( self : Tuple , **lowerCAmelCase : int ): '''simple docstring''' UpperCAmelCase_ = { "num_train_timesteps": 1_000, "beta_start": 0.0_001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**lowerCAmelCase ) return config def __A ( self : str , lowerCAmelCase : Optional[Any]=0 , **lowerCAmelCase : Any ): '''simple docstring''' UpperCAmelCase_ = dict(self.forward_default_kwargs ) UpperCAmelCase_ = kwargs.pop("num_inference_steps" , lowerCAmelCase ) UpperCAmelCase_ = self.dummy_sample UpperCAmelCase_ = 0.1 * sample UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase_ = self.get_scheduler_config(**lowerCAmelCase ) UpperCAmelCase_ = scheduler_class(**lowerCAmelCase ) scheduler.set_timesteps(lowerCAmelCase ) # copy over dummy past residuals UpperCAmelCase_ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCAmelCase ) UpperCAmelCase_ = scheduler_class.from_pretrained(lowerCAmelCase ) new_scheduler.set_timesteps(lowerCAmelCase ) # copy over dummy past residuals UpperCAmelCase_ = dummy_past_residuals[:] UpperCAmelCase_ = scheduler.step_prk(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample UpperCAmelCase_ = new_scheduler.step_prk(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" UpperCAmelCase_ = scheduler.step_plms(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample UpperCAmelCase_ = new_scheduler.step_plms(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __A ( self : int ): '''simple docstring''' pass def __A ( self : int , lowerCAmelCase : Any=0 , **lowerCAmelCase : Tuple ): '''simple docstring''' UpperCAmelCase_ = dict(self.forward_default_kwargs ) UpperCAmelCase_ = kwargs.pop("num_inference_steps" , lowerCAmelCase ) UpperCAmelCase_ = self.dummy_sample UpperCAmelCase_ = 0.1 * sample UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase_ = self.get_scheduler_config() UpperCAmelCase_ = scheduler_class(**lowerCAmelCase ) scheduler.set_timesteps(lowerCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase_ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCAmelCase ) UpperCAmelCase_ = scheduler_class.from_pretrained(lowerCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(lowerCAmelCase ) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase_ = dummy_past_residuals[:] UpperCAmelCase_ = scheduler.step_prk(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample UpperCAmelCase_ = new_scheduler.step_prk(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" UpperCAmelCase_ = scheduler.step_plms(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample UpperCAmelCase_ = new_scheduler.step_plms(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __A ( self : Tuple , **lowerCAmelCase : List[str] ): '''simple docstring''' UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config(**lowerCAmelCase ) UpperCAmelCase_ = scheduler_class(**lowerCAmelCase ) UpperCAmelCase_ = 10 UpperCAmelCase_ = self.dummy_model() UpperCAmelCase_ = self.dummy_sample_deter scheduler.set_timesteps(lowerCAmelCase ) for i, t in enumerate(scheduler.prk_timesteps ): UpperCAmelCase_ = model(lowerCAmelCase , lowerCAmelCase ) UpperCAmelCase_ = scheduler.step_prk(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): UpperCAmelCase_ = model(lowerCAmelCase , lowerCAmelCase ) UpperCAmelCase_ = scheduler.step_plms(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample return sample def __A ( self : str ): '''simple docstring''' UpperCAmelCase_ = dict(self.forward_default_kwargs ) UpperCAmelCase_ = kwargs.pop("num_inference_steps" , lowerCAmelCase ) for scheduler_class in self.scheduler_classes: UpperCAmelCase_ = self.get_scheduler_config() UpperCAmelCase_ = scheduler_class(**lowerCAmelCase ) UpperCAmelCase_ = self.dummy_sample UpperCAmelCase_ = 0.1 * sample if num_inference_steps is not None and hasattr(lowerCAmelCase , "set_timesteps" ): scheduler.set_timesteps(lowerCAmelCase ) elif num_inference_steps is not None and not hasattr(lowerCAmelCase , "set_timesteps" ): UpperCAmelCase_ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] UpperCAmelCase_ = dummy_past_residuals[:] UpperCAmelCase_ = scheduler.step_prk(lowerCAmelCase , 0 , lowerCAmelCase , **lowerCAmelCase ).prev_sample UpperCAmelCase_ = scheduler.step_prk(lowerCAmelCase , 1 , lowerCAmelCase , **lowerCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) UpperCAmelCase_ = scheduler.step_plms(lowerCAmelCase , 0 , lowerCAmelCase , **lowerCAmelCase ).prev_sample UpperCAmelCase_ = scheduler.step_plms(lowerCAmelCase , 1 , lowerCAmelCase , **lowerCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def __A ( self : Any ): '''simple docstring''' for timesteps in [100, 1_000]: self.check_over_configs(num_train_timesteps=lowerCAmelCase ) def __A ( self : List[str] ): '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowerCAmelCase ) UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config(steps_offset=1 ) UpperCAmelCase_ = scheduler_class(**lowerCAmelCase ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , ) def __A ( self : Any ): '''simple docstring''' for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase ) def __A ( self : Dict ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowerCAmelCase ) def __A ( self : int ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase ) def __A ( self : Union[str, Any] ): '''simple docstring''' for t in [1, 5, 10]: self.check_over_forward(time_step=lowerCAmelCase ) def __A ( self : Tuple ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=lowerCAmelCase ) def __A ( self : Dict ): '''simple docstring''' UpperCAmelCase_ = 27 for scheduler_class in self.scheduler_classes: UpperCAmelCase_ = self.dummy_sample UpperCAmelCase_ = 0.1 * sample UpperCAmelCase_ = self.get_scheduler_config() UpperCAmelCase_ = scheduler_class(**lowerCAmelCase ) scheduler.set_timesteps(lowerCAmelCase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): UpperCAmelCase_ = scheduler.step_prk(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample def __A ( self : str ): '''simple docstring''' with self.assertRaises(lowerCAmelCase ): UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config() UpperCAmelCase_ = scheduler_class(**lowerCAmelCase ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def __A ( self : Dict ): '''simple docstring''' UpperCAmelCase_ = self.full_loop() UpperCAmelCase_ = torch.sum(torch.abs(lowerCAmelCase ) ) UpperCAmelCase_ = torch.mean(torch.abs(lowerCAmelCase ) ) assert abs(result_sum.item() - 198.1_318 ) < 1e-2 assert abs(result_mean.item() - 0.2_580 ) < 1e-3 def __A ( self : List[str] ): '''simple docstring''' UpperCAmelCase_ = self.full_loop(prediction_type="v_prediction" ) UpperCAmelCase_ = torch.sum(torch.abs(lowerCAmelCase ) ) UpperCAmelCase_ = torch.mean(torch.abs(lowerCAmelCase ) ) assert abs(result_sum.item() - 67.3_986 ) < 1e-2 assert abs(result_mean.item() - 0.0_878 ) < 1e-3 def __A ( self : int ): '''simple docstring''' UpperCAmelCase_ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01 ) UpperCAmelCase_ = torch.sum(torch.abs(lowerCAmelCase ) ) UpperCAmelCase_ = torch.mean(torch.abs(lowerCAmelCase ) ) assert abs(result_sum.item() - 230.0_399 ) < 1e-2 assert abs(result_mean.item() - 0.2_995 ) < 1e-3 def __A ( self : int ): '''simple docstring''' UpperCAmelCase_ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01 ) UpperCAmelCase_ = torch.sum(torch.abs(lowerCAmelCase ) ) UpperCAmelCase_ = torch.mean(torch.abs(lowerCAmelCase ) ) assert abs(result_sum.item() - 186.9_482 ) < 1e-2 assert abs(result_mean.item() - 0.2_434 ) < 1e-3
268
import importlib import torch import yaml from omegaconf import OmegaConf from taming.models.vqgan import VQModel def __lowerCAmelCase ( A , A=False ): UpperCAmelCase_ = OmegaConf.load(A ) if display: print(yaml.dump(OmegaConf.to_container(A ) ) ) return config def __lowerCAmelCase ( A , A=None , A=None ): if conf_path is None: UpperCAmelCase_ = "./model_checkpoints/vqgan_only.yaml" UpperCAmelCase_ = load_config(A , display=A ) UpperCAmelCase_ = VQModel(**config.model.params ) if ckpt_path is None: UpperCAmelCase_ = "./model_checkpoints/vqgan_only.pt" UpperCAmelCase_ = torch.load(A , map_location=A ) if ".ckpt" in ckpt_path: UpperCAmelCase_ = sd["state_dict"] model.load_state_dict(A , strict=A ) model.to(A ) del sd return model def __lowerCAmelCase ( A , A ): UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = model.encode(A ) print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" ) UpperCAmelCase_ = model.decode(A ) return xrec def __lowerCAmelCase ( A , A=False ): UpperCAmelCase_ , UpperCAmelCase_ = string.rsplit("." , 1 ) if reload: UpperCAmelCase_ = importlib.import_module(A ) importlib.reload(A ) return getattr(importlib.import_module(A , package=A ) , cls ) def __lowerCAmelCase ( A ): if "target" not in config: raise KeyError("Expected key `target` to instantiate." ) return get_obj_from_str(config["target"] )(**config.get("params" , {} ) ) def __lowerCAmelCase ( A , A , A=True , A=True ): UpperCAmelCase_ = instantiate_from_config(A ) if sd is not None: model.load_state_dict(A ) if gpu: model.cuda() if eval_mode: model.eval() return {"model": model} def __lowerCAmelCase ( A , A , A , A ): # load the specified checkpoint if ckpt: UpperCAmelCase_ = torch.load(A , map_location="cpu" ) UpperCAmelCase_ = pl_sd["global_step"] print(F"loaded model from global step {global_step}." ) else: UpperCAmelCase_ = {"state_dict": None} UpperCAmelCase_ = None UpperCAmelCase_ = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=A , eval_mode=A )["model"] return model, global_step
268
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: __a = None __a = logging.get_logger(__name__) __a = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} __a = { 'vocab_file': { 'facebook/mbart-large-en-ro': ( 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model' ), 'facebook/mbart-large-cc25': ( 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json', 'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json', }, } __a = { 'facebook/mbart-large-en-ro': 1_024, 'facebook/mbart-large-cc25': 1_024, } # fmt: off __a = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN'] class A__ ( UpperCamelCase ): """simple docstring""" UpperCamelCase_ : int = VOCAB_FILES_NAMES UpperCamelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[Any] = ['''input_ids''', '''attention_mask'''] UpperCamelCase_ : str = MBartTokenizer UpperCamelCase_ : List[Any] = [] UpperCamelCase_ : int = [] def __init__( self : str , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : int="<s>" , lowerCAmelCase__ : Optional[int]="</s>" , lowerCAmelCase__ : Union[str, Any]="</s>" , lowerCAmelCase__ : List[str]="<s>" , lowerCAmelCase__ : Union[str, Any]="<unk>" , lowerCAmelCase__ : Tuple="<pad>" , lowerCAmelCase__ : Dict="<mask>" , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : List[Any] , ) -> Dict: """simple docstring""" _UpperCAmelCase : Dict = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token super().__init__( vocab_file=A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , src_lang=A_ , tgt_lang=A_ , additional_special_tokens=A_ , **A_ , ) _UpperCAmelCase : Any = vocab_file _UpperCAmelCase : Tuple = False if not self.vocab_file else True _UpperCAmelCase : Any = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} ) _UpperCAmelCase : Optional[Any] = { lang_code: self.convert_tokens_to_ids(A_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } _UpperCAmelCase : Optional[Any] = src_lang if src_lang is not None else """en_XX""" _UpperCAmelCase : List[str] = self.convert_tokens_to_ids(self._src_lang ) _UpperCAmelCase : Union[str, Any] = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def _lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" return self._src_lang @src_lang.setter def _lowerCAmelCase ( self : str , lowerCAmelCase__ : Optional[Any] ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : Optional[Any] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] = None ) -> Union[str, Any]: """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _lowerCAmelCase ( self : int , lowerCAmelCase__ : str , lowerCAmelCase__ : str = None ) -> Dict: """simple docstring""" _UpperCAmelCase : List[Any] = [self.sep_token_id] _UpperCAmelCase : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any , **lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) _UpperCAmelCase : Dict = src_lang _UpperCAmelCase : Dict = self(A_ , add_special_tokens=A_ , return_tensors=A_ , **A_ ) _UpperCAmelCase : Optional[Any] = self.convert_tokens_to_ids(A_ ) _UpperCAmelCase : str = tgt_lang_id return inputs def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] = "en_XX" , lowerCAmelCase__ : List[str] = None , lowerCAmelCase__ : Optional[int] = "ro_RO" , **lowerCAmelCase__ : Any , ) -> List[str]: """simple docstring""" _UpperCAmelCase : int = src_lang _UpperCAmelCase : int = tgt_lang return super().prepare_seqaseq_batch(A_ , A_ , **A_ ) def _lowerCAmelCase ( self : Tuple ) -> str: """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def _lowerCAmelCase ( self : Dict ) -> str: """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _lowerCAmelCase ( self : int , lowerCAmelCase__ : int ) -> int: """simple docstring""" _UpperCAmelCase : int = self.convert_tokens_to_ids(A_ ) _UpperCAmelCase : Union[str, Any] = [] _UpperCAmelCase : Any = [self.eos_token_id, self.cur_lang_code] _UpperCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) _UpperCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens ) _UpperCAmelCase : str = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : Any ) -> Any: """simple docstring""" _UpperCAmelCase : Optional[int] = self.convert_tokens_to_ids(A_ ) _UpperCAmelCase : str = [] _UpperCAmelCase : Any = [self.eos_token_id, self.cur_lang_code] _UpperCAmelCase : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens ) _UpperCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens ) _UpperCAmelCase : int = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] = None ) -> List[Any]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(A_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" ) return _UpperCAmelCase : int = os.path.join( A_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ): copyfile(self.vocab_file , A_ ) return (out_vocab_file,)
494
import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE__ : Optional[int] = '▁' SCREAMING_SNAKE_CASE__ : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece class _SCREAMING_SNAKE_CASE ( A , unittest.TestCase ): __SCREAMING_SNAKE_CASE = BertGenerationTokenizer __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = True def __snake_case( self ): super().setUp() _UpperCAmelCase : Optional[int] = BertGenerationTokenizer(A_ , keep_accents=A_ ) tokenizer.save_pretrained(self.tmpdirname ) def __snake_case( self ): _UpperCAmelCase : Optional[int] = """<s>""" _UpperCAmelCase : Any = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ ) def __snake_case( self ): _UpperCAmelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """<pad>""" ) self.assertEqual(len(A_ ) , 10_02 ) def __snake_case( self ): self.assertEqual(self.get_tokenizer().vocab_size , 10_00 ) def __snake_case( self ): _UpperCAmelCase : Union[str, Any] = BertGenerationTokenizer(A_ , keep_accents=A_ ) _UpperCAmelCase : List[Any] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(A_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(A_ ) , [2_85, 46, 10, 1_70, 3_82] , ) _UpperCAmelCase : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( A_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) _UpperCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(A_ ) self.assertListEqual( A_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) _UpperCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(A_ ) self.assertListEqual( A_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def __snake_case( self ): return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) @slow def __snake_case( self ): _UpperCAmelCase : Optional[Any] = """Hello World!""" _UpperCAmelCase : str = [1_85_36, 22_60, 1_01] self.assertListEqual(A_ , self.big_tokenizer.encode(A_ ) ) @slow def __snake_case( self ): _UpperCAmelCase : List[str] = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) _UpperCAmelCase : List[Any] = [ 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, ] self.assertListEqual(A_ , self.big_tokenizer.encode(A_ ) ) @require_torch @slow def __snake_case( self ): import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence _UpperCAmelCase : Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10] _UpperCAmelCase : Tuple = """ """.join(A_ ) _UpperCAmelCase : List[Any] = self.big_tokenizer.encode_plus(A_ , return_tensors="""pt""" , return_token_type_ids=A_ ) _UpperCAmelCase : Any = self.big_tokenizer.batch_encode_plus( [sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=A_ ) _UpperCAmelCase : int = BertGenerationConfig() _UpperCAmelCase : Dict = BertGenerationEncoder(A_ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**A_ ) model(**A_ ) @slow def __snake_case( self ): # fmt: off _UpperCAmelCase : Optional[int] = {"""input_ids""": [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A_ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
643
0
"""simple docstring""" import os # Precomputes a list of the 100 first triangular numbers UpperCAmelCase__ =[int(0.5 * n * (n + 1)) for n in range(1, 101)] def lowerCAmelCase_ ( ): """simple docstring""" __lowercase = os.path.dirname(os.path.realpath(SCREAMING_SNAKE_CASE_ ) ) __lowercase = os.path.join(SCREAMING_SNAKE_CASE_ , """words.txt""" ) __lowercase = """""" with open(SCREAMING_SNAKE_CASE_ ) as f: __lowercase = f.readline() __lowercase = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )] __lowercase = [ word for word in [sum(ord(SCREAMING_SNAKE_CASE_ ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": print(solution())
700
"""simple docstring""" import argparse import logging import pickle from collections import Counter logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) UpperCAmelCase__ =logging.getLogger(__name__) if __name__ == "__main__": UpperCAmelCase__ =argparse.ArgumentParser( description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)" ) parser.add_argument( "--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset." ) parser.add_argument( "--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file." ) parser.add_argument("--vocab_size", default=3_0522, type=int) UpperCAmelCase__ =parser.parse_args() logger.info(f"""Loading data from {args.data_file}""") with open(args.data_file, "rb") as fp: UpperCAmelCase__ =pickle.load(fp) logger.info("Counting occurrences for MLM.") UpperCAmelCase__ =Counter() for tk_ids in data: counter.update(tk_ids) UpperCAmelCase__ =[0] * args.vocab_size for k, v in counter.items(): UpperCAmelCase__ =v logger.info(f"""Dump to {args.token_counts_dump}""") with open(args.token_counts_dump, "wb") as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
442
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCAmelCase : Optional[int] = logging.get_logger(__name__) __lowerCAmelCase : int = { "xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json", "xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json", "xlm-roberta-large-finetuned-conll02-dutch": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json" ), "xlm-roberta-large-finetuned-conll02-spanish": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json" ), "xlm-roberta-large-finetuned-conll03-english": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json" ), "xlm-roberta-large-finetuned-conll03-german": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json" ), } class A ( lowercase_ ): a_ = '''xlm-roberta''' def __init__( self : Dict , __a : List[str]=3_0_5_2_2 , __a : Any=7_6_8 , __a : str=1_2 , __a : Tuple=1_2 , __a : Optional[Any]=3_0_7_2 , __a : Tuple="gelu" , __a : Optional[Any]=0.1 , __a : Tuple=0.1 , __a : Optional[Any]=5_1_2 , __a : List[Any]=2 , __a : int=0.0_2 , __a : List[str]=1e-12 , __a : Union[str, Any]=1 , __a : Any=0 , __a : List[str]=2 , __a : int="absolute" , __a : Tuple=True , __a : List[Any]=None , **__a : str , ) -> Optional[Any]: super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a ) __UpperCAmelCase = vocab_size __UpperCAmelCase = hidden_size __UpperCAmelCase = num_hidden_layers __UpperCAmelCase = num_attention_heads __UpperCAmelCase = hidden_act __UpperCAmelCase = intermediate_size __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = max_position_embeddings __UpperCAmelCase = type_vocab_size __UpperCAmelCase = initializer_range __UpperCAmelCase = layer_norm_eps __UpperCAmelCase = position_embedding_type __UpperCAmelCase = use_cache __UpperCAmelCase = classifier_dropout class A ( lowercase_ ): @property def snake_case__ ( self : Optional[Any] ) -> Optional[int]: if self.task == "multiple-choice": __UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: __UpperCAmelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
262
'''simple docstring''' import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( lowercase_ ): '''simple docstring''' def __init__( self : Tuple , *snake_case : Any , **snake_case : Optional[int] ): """simple docstring""" warnings.warn( 'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use GLPNImageProcessor instead.' , snake_case , ) super().__init__(*snake_case , **snake_case )
517
0
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int: '''simple docstring''' if height >= 1: move_tower(height - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) move_disk(UpperCamelCase__ , UpperCamelCase__ ) move_tower(height - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]: '''simple docstring''' print("""moving disk from""" , UpperCamelCase__ , """to""" , UpperCamelCase__ ) def UpperCAmelCase ( ) -> Optional[Any]: '''simple docstring''' __lowerCAmelCase = int(input("""Height of hanoi: """ ).strip() ) move_tower(UpperCamelCase__ , """A""" , """B""" , """C""" ) if __name__ == "__main__": main()
701
import argparse import os import re __A : List[Any] = "src/diffusers" # Pattern that looks at the indentation in a line. __A : Dict = re.compile(r"^(\s*)\S") # Pattern that matches `"key":" and puts `key` in group 0. __A : Optional[int] = re.compile(r"^\s*\"([^\"]+)\":") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. __A : Union[str, Any] = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]") # Pattern that matches `"key",` and puts `key` in group 0. __A : List[Any] = re.compile(r"^\s*\"([^\"]+)\",\s*$") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. __A : List[str] = re.compile(r"\[([^\]]+)\]") def UpperCAmelCase ( UpperCamelCase__ ) -> Optional[int]: '''simple docstring''' __lowerCAmelCase = _re_indent.search(UpperCamelCase__ ) return "" if search is None else search.groups()[0] def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__="" , UpperCamelCase__=None , UpperCamelCase__=None ) -> List[str]: '''simple docstring''' __lowerCAmelCase = 0 __lowerCAmelCase = code.split("""\n""" ) if start_prompt is not None: while not lines[index].startswith(UpperCamelCase__ ): index += 1 __lowerCAmelCase = ["""\n""".join(lines[:index] )] else: __lowerCAmelCase = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). __lowerCAmelCase = [lines[index]] index += 1 while index < len(UpperCamelCase__ ) and (end_prompt is None or not lines[index].startswith(UpperCamelCase__ )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(UpperCamelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ): current_block.append(lines[index] ) blocks.append("""\n""".join(UpperCamelCase__ ) ) if index < len(UpperCamelCase__ ) - 1: __lowerCAmelCase = [lines[index + 1]] index += 1 else: __lowerCAmelCase = [] else: blocks.append("""\n""".join(UpperCamelCase__ ) ) __lowerCAmelCase = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(UpperCamelCase__ ) > 0: blocks.append("""\n""".join(UpperCamelCase__ ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(UpperCamelCase__ ): blocks.append("""\n""".join(lines[index:] ) ) return blocks def UpperCAmelCase ( UpperCamelCase__ ) -> Dict: '''simple docstring''' def _inner(UpperCamelCase__ ): return key(UpperCamelCase__ ).lower().replace("""_""" , """""" ) return _inner def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__=None ) -> Tuple: '''simple docstring''' def noop(UpperCamelCase__ ): return x if key is None: __lowerCAmelCase = noop # Constants are all uppercase, they go first. __lowerCAmelCase = [obj for obj in objects if key(UpperCamelCase__ ).isupper()] # Classes are not all uppercase but start with a capital, they go second. __lowerCAmelCase = [obj for obj in objects if key(UpperCamelCase__ )[0].isupper() and not key(UpperCamelCase__ ).isupper()] # Functions begin with a lowercase, they go last. __lowerCAmelCase = [obj for obj in objects if not key(UpperCamelCase__ )[0].isupper()] __lowerCAmelCase = ignore_underscore(UpperCamelCase__ ) return sorted(UpperCamelCase__ , key=UpperCamelCase__ ) + sorted(UpperCamelCase__ , key=UpperCamelCase__ ) + sorted(UpperCamelCase__ , key=UpperCamelCase__ ) def UpperCAmelCase ( UpperCamelCase__ ) -> Optional[int]: '''simple docstring''' def _replace(UpperCamelCase__ ): __lowerCAmelCase = match.groups()[0] if "," not in imports: return F'''[{imports}]''' __lowerCAmelCase = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __lowerCAmelCase = keys[:-1] return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(UpperCamelCase__ )] ) + "]" __lowerCAmelCase = import_statement.split("""\n""" ) if len(UpperCamelCase__ ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. __lowerCAmelCase = 2 if lines[1].strip() == """[""" else 1 __lowerCAmelCase = [(i, _re_strip_line.search(UpperCamelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] __lowerCAmelCase = sort_objects(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[1] ) __lowerCAmelCase = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(UpperCamelCase__ ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: __lowerCAmelCase = _re_bracket_content.sub(_replace , lines[1] ) else: __lowerCAmelCase = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __lowerCAmelCase = keys[:-1] __lowerCAmelCase = get_indent(lines[1] ) + """, """.join([F'''"{k}"''' for k in sort_objects(UpperCamelCase__ )] ) return "\n".join(UpperCamelCase__ ) else: # Finally we have to deal with imports fitting on one line __lowerCAmelCase = _re_bracket_content.sub(_replace , UpperCamelCase__ ) return import_statement def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__=True ) -> Optional[int]: '''simple docstring''' with open(UpperCamelCase__ , """r""" ) as f: __lowerCAmelCase = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 __lowerCAmelCase = split_code_in_indented_blocks( UpperCamelCase__ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(UpperCamelCase__ ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. __lowerCAmelCase = main_blocks[block_idx] __lowerCAmelCase = block.split("""\n""" ) # Get to the start of the imports. __lowerCAmelCase = 0 while line_idx < len(UpperCamelCase__ ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: __lowerCAmelCase = len(UpperCamelCase__ ) else: line_idx += 1 if line_idx >= len(UpperCamelCase__ ): continue # Ignore beginning and last line: they don't contain anything. __lowerCAmelCase = """\n""".join(block_lines[line_idx:-1] ) __lowerCAmelCase = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. __lowerCAmelCase = split_code_in_indented_blocks(UpperCamelCase__ , indent_level=UpperCamelCase__ ) # We have two categories of import key: list or _import_structure[key].append/extend __lowerCAmelCase = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. __lowerCAmelCase = [(pattern.search(UpperCamelCase__ ).groups()[0] if pattern.search(UpperCamelCase__ ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. __lowerCAmelCase = [(i, key) for i, key in enumerate(UpperCamelCase__ ) if key is not None] __lowerCAmelCase = [x[0] for x in sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. __lowerCAmelCase = 0 __lowerCAmelCase = [] for i in range(len(UpperCamelCase__ ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: __lowerCAmelCase = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(UpperCamelCase__ ) count += 1 # And we put our main block back together with its first and last line. __lowerCAmelCase = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(UpperCamelCase__ ): if check_only: return True else: print(F'''Overwriting {file}.''' ) with open(UpperCamelCase__ , """w""" ) as f: f.write("""\n""".join(UpperCamelCase__ ) ) def UpperCAmelCase ( UpperCamelCase__=True ) -> Tuple: '''simple docstring''' __lowerCAmelCase = [] for root, _, files in os.walk(UpperCamelCase__ ): if "__init__.py" in files: __lowerCAmelCase = sort_imports(os.path.join(UpperCamelCase__ , """__init__.py""" ) , check_only=UpperCamelCase__ ) if result: __lowerCAmelCase = [os.path.join(UpperCamelCase__ , """__init__.py""" )] if len(UpperCamelCase__ ) > 0: raise ValueError(F'''Would overwrite {len(UpperCamelCase__ )} files, run `make style`.''' ) if __name__ == "__main__": __A : Tuple = argparse.ArgumentParser() parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.") __A : Any = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
334
0
'''simple docstring''' from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean _UpperCamelCase : List[str] = 0 _UpperCamelCase : Any = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] _UpperCamelCase : Tuple = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right _UpperCamelCase : str = tuple[int, int] class _lowercase: """simple docstring""" def __init__( self: List[str] ,a: int ,a: int ,a: int ,a: int ,a: int ,a: Node | None ,): __UpperCAmelCase = pos_x __UpperCAmelCase = pos_y __UpperCAmelCase = (pos_y, pos_x) __UpperCAmelCase = goal_x __UpperCAmelCase = goal_y __UpperCAmelCase = g_cost __UpperCAmelCase = parent __UpperCAmelCase = self.calculate_heuristic() __UpperCAmelCase = self.g_cost + self.h_cost def snake_case ( self: Tuple ): __UpperCAmelCase = self.pos_x - self.goal_x __UpperCAmelCase = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(a ) + abs(a ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self: Any ,a: Node ): return self.f_cost < other.f_cost class _lowercase: """simple docstring""" def __init__( self: Tuple ,a: TPosition ,a: TPosition ): __UpperCAmelCase = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,a ) __UpperCAmelCase = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,99999 ,a ) __UpperCAmelCase = [self.start] __UpperCAmelCase = [] __UpperCAmelCase = False def snake_case ( self: List[Any] ): while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() __UpperCAmelCase = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(a ) self.closed_nodes.append(a ) __UpperCAmelCase = self.get_successors(a ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(a ) else: # retrieve the best current path __UpperCAmelCase = self.open_nodes.pop(self.open_nodes.index(a ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(a ) else: self.open_nodes.append(a ) return [self.start.pos] def snake_case ( self: Optional[int] ,a: Node ): __UpperCAmelCase = [] for action in delta: __UpperCAmelCase = parent.pos_x + action[1] __UpperCAmelCase = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(a ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( a ,a ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,a ,) ) return successors def snake_case ( self: Optional[Any] ,a: Node | None ): __UpperCAmelCase = node __UpperCAmelCase = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) __UpperCAmelCase = current_node.parent path.reverse() return path class _lowercase: """simple docstring""" def __init__( self: Optional[int] ,a: TPosition ,a: TPosition ): __UpperCAmelCase = AStar(a ,a ) __UpperCAmelCase = AStar(a ,a ) __UpperCAmelCase = False def snake_case ( self: Tuple ): while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() __UpperCAmelCase = self.fwd_astar.open_nodes.pop(0 ) __UpperCAmelCase = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( a ,a ) self.fwd_astar.closed_nodes.append(a ) self.bwd_astar.closed_nodes.append(a ) __UpperCAmelCase = current_bwd_node __UpperCAmelCase = current_fwd_node __UpperCAmelCase = { self.fwd_astar: self.fwd_astar.get_successors(a ), self.bwd_astar: self.bwd_astar.get_successors(a ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(a ) else: # retrieve the best current path __UpperCAmelCase = astar.open_nodes.pop( astar.open_nodes.index(a ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(a ) else: astar.open_nodes.append(a ) return [self.fwd_astar.start.pos] def snake_case ( self: Optional[Any] ,a: Node ,a: Node ): __UpperCAmelCase = self.fwd_astar.retrace_path(a ) __UpperCAmelCase = self.bwd_astar.retrace_path(a ) bwd_path.pop() bwd_path.reverse() __UpperCAmelCase = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] _UpperCamelCase : Union[str, Any] = (0, 0) _UpperCamelCase : Optional[int] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) _UpperCamelCase : Any = time.time() _UpperCamelCase : Optional[Any] = AStar(init, goal) _UpperCamelCase : Optional[Any] = a_star.search() _UpperCamelCase : Dict = time.time() - start_time print(f"AStar execution time = {end_time:f} seconds") _UpperCamelCase : int = time.time() _UpperCamelCase : List[str] = BidirectionalAStar(init, goal) _UpperCamelCase : str = time.time() - bd_start_time print(f"BidirectionalAStar execution time = {bd_end_time:f} seconds")
396
'''simple docstring''' from ..utils import DummyObject, requires_backends class _lowercase( metaclass=_lowerCamelCase ): """simple docstring""" __lowerCamelCase = ['''onnx'''] def __init__( self: Any ,*a: List[str] ,**a: str ): requires_backends(self ,['onnx'] ) @classmethod def snake_case ( cls: Optional[Any] ,*a: List[str] ,**a: Any ): requires_backends(cls ,['onnx'] ) @classmethod def snake_case ( cls: str ,*a: Dict ,**a: str ): requires_backends(cls ,['onnx'] )
396
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) def _lowerCamelCase ( A_ : Dict ) -> int: '''simple docstring''' UpperCamelCase__ : Any ="huggingface/label-files" UpperCamelCase__ : int ="imagenet-1k-id2label.json" UpperCamelCase__ : str =json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) ) UpperCamelCase__ : Dict ={int(A_ ): v for k, v in idalabel.items()} UpperCamelCase__ : str ={v: k for k, v in idalabel.items()} UpperCamelCase__ : List[str] ="std_conv" if "bit" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" UpperCamelCase__ : Optional[Any] =BitConfig( conv_layer=A_ , num_labels=1_0_0_0 , idalabel=A_ , labelaid=A_ , ) return config def _lowerCamelCase ( A_ : str ) -> Dict: '''simple docstring''' if "stem.conv" in name: UpperCamelCase__ : Optional[Any] =name.replace("stem.conv" , "bit.embedder.convolution" ) if "blocks" in name: UpperCamelCase__ : List[str] =name.replace("blocks" , "layers" ) if "head.fc" in name: UpperCamelCase__ : Optional[Any] =name.replace("head.fc" , "classifier.1" ) if name.startswith("norm" ): UpperCamelCase__ : Any ="bit." + name if "bit" not in name and "classifier" not in name: UpperCamelCase__ : str ="bit.encoder." + name return name def _lowerCamelCase ( ) -> str: '''simple docstring''' UpperCamelCase__ : int ="http://images.cocodataset.org/val2017/000000039769.jpg" UpperCamelCase__ : List[str] =Image.open(requests.get(A_ , stream=A_ ).raw ) return im @torch.no_grad() def _lowerCamelCase ( A_ : List[str] , A_ : str , A_ : Any=False ) -> Tuple: '''simple docstring''' UpperCamelCase__ : int =get_config(A_ ) # load original model from timm UpperCamelCase__ : Any =create_model(A_ , pretrained=A_ ) timm_model.eval() # load state_dict of original model UpperCamelCase__ : Any =timm_model.state_dict() for key in state_dict.copy().keys(): UpperCamelCase__ : Optional[Any] =state_dict.pop(A_ ) UpperCamelCase__ : Any =val.squeeze() if "head" in key else val # load HuggingFace model UpperCamelCase__ : List[Any] =BitForImageClassification(A_ ) model.eval() model.load_state_dict(A_ ) # create image processor UpperCamelCase__ : Optional[Any] =create_transform(**resolve_data_config({} , model=A_ ) ) UpperCamelCase__ : str =transform.transforms UpperCamelCase__ : Any ={ "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } UpperCamelCase__ : List[str] =BitImageProcessor( do_resize=A_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=A_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=A_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) UpperCamelCase__ : List[str] =prepare_img() UpperCamelCase__ : Optional[Any] =transform(A_ ).unsqueeze(0 ) UpperCamelCase__ : Optional[Any] =processor(A_ , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(A_ , A_ ) # verify logits with torch.no_grad(): UpperCamelCase__ : int =model(A_ ) UpperCamelCase__ : List[Any] =outputs.logits print("Logits:" , logits[0, :3] ) print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] ) UpperCamelCase__ : Dict =timm_model(A_ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(A_ , outputs.logits , atol=1E-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(A_ ).mkdir(exist_ok=A_ ) print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) processor.save_pretrained(A_ ) if push_to_hub: print(f'''Pushing model {model_name} and processor to the hub''' ) model.push_to_hub(f'''ybelkada/{model_name}''' ) processor.push_to_hub(f'''ybelkada/{model_name}''' ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""resnetv2_50x1_bitm""", type=str, help="""Name of the BiT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub.""", ) __UpperCAmelCase = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
582
import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase__( snake_case__ , unittest.TestCase ): '''simple docstring''' snake_case__ = LEDTokenizer snake_case__ = LEDTokenizerFast snake_case__ = True def UpperCAmelCase ( self) -> List[Any]: """simple docstring""" super().setUp() UpperCamelCase__ : Any =[ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] UpperCamelCase__ : int =dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE)))) UpperCamelCase__ : Optional[int] =["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] UpperCamelCase__ : Optional[int] ={"unk_token": "<unk>"} UpperCamelCase__ : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) UpperCamelCase__ : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as fp: fp.write(json.dumps(__SCREAMING_SNAKE_CASE) + "\n") with open(self.merges_file , "w" , encoding="utf-8") as fp: fp.write("\n".join(__SCREAMING_SNAKE_CASE)) def UpperCAmelCase ( self , **__SCREAMING_SNAKE_CASE) -> Optional[int]: """simple docstring""" kwargs.update(self.special_tokens_map) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE) def UpperCAmelCase ( self , **__SCREAMING_SNAKE_CASE) -> Optional[Any]: """simple docstring""" kwargs.update(self.special_tokens_map) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE) def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> Any: """simple docstring""" return "lower newer", "lower newer" @cached_property def UpperCAmelCase ( self) -> Optional[Any]: """simple docstring""" return LEDTokenizer.from_pretrained("allenai/led-base-16384") @cached_property def UpperCAmelCase ( self) -> Optional[Any]: """simple docstring""" return LEDTokenizerFast.from_pretrained("allenai/led-base-16384") @require_torch def UpperCAmelCase ( self) -> Optional[int]: """simple docstring""" UpperCamelCase__ : Optional[Any] =["A long paragraph for summarization.", "Another paragraph for summarization."] UpperCamelCase__ : Optional[int] =[0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCamelCase__ : Any =tokenizer(__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE) , padding=__SCREAMING_SNAKE_CASE , return_tensors="pt") self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) self.assertEqual((2, 9) , batch.input_ids.shape) self.assertEqual((2, 9) , batch.attention_mask.shape) UpperCamelCase__ : List[Any] =batch.input_ids.tolist()[0] self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) @require_torch def UpperCAmelCase ( self) -> Union[str, Any]: """simple docstring""" UpperCamelCase__ : List[Any] =["A long paragraph for summarization.", "Another paragraph for summarization."] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCamelCase__ : int =tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors="pt") self.assertIn("input_ids" , __SCREAMING_SNAKE_CASE) self.assertIn("attention_mask" , __SCREAMING_SNAKE_CASE) self.assertNotIn("labels" , __SCREAMING_SNAKE_CASE) self.assertNotIn("decoder_attention_mask" , __SCREAMING_SNAKE_CASE) @require_torch def UpperCAmelCase ( self) -> List[Any]: """simple docstring""" UpperCamelCase__ : Optional[Any] =[ "Summary of the text.", "Another summary.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCamelCase__ : Tuple =tokenizer(text_target=__SCREAMING_SNAKE_CASE , max_length=32 , padding="max_length" , return_tensors="pt") self.assertEqual(32 , targets["input_ids"].shape[1]) @require_torch def UpperCAmelCase ( self) -> int: """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCamelCase__ : Optional[int] =tokenizer( ["I am a small frog" * 10_24, "I am a small frog"] , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors="pt") self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) self.assertEqual(batch.input_ids.shape , (2, 51_22)) @require_torch def UpperCAmelCase ( self) -> List[Any]: """simple docstring""" UpperCamelCase__ : Union[str, Any] =["A long paragraph for summarization."] UpperCamelCase__ : Any =[ "Summary of the text.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCamelCase__ : str =tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="pt") UpperCamelCase__ : str =tokenizer(text_target=__SCREAMING_SNAKE_CASE , return_tensors="pt") UpperCamelCase__ : int =inputs["input_ids"] UpperCamelCase__ : Tuple =targets["input_ids"] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item()) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item()) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item()) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item()) @require_torch def UpperCAmelCase ( self) -> List[str]: """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCamelCase__ : Any =["Summary of the text.", "Another summary."] UpperCamelCase__ : List[str] =[[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] UpperCamelCase__ : str =tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE) UpperCamelCase__ : Optional[Any] =[[0] * len(__SCREAMING_SNAKE_CASE) for x in encoded_output["input_ids"]] UpperCamelCase__ : Any =tokenizer.pad(__SCREAMING_SNAKE_CASE) self.assertSequenceEqual(outputs["global_attention_mask"] , __SCREAMING_SNAKE_CASE) def UpperCAmelCase ( self) -> Optional[int]: """simple docstring""" pass def UpperCAmelCase ( self) -> List[Any]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})'''): UpperCamelCase__ : Dict =self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) UpperCamelCase__ : Dict =self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) UpperCamelCase__ : List[str] ="A, <mask> AllenNLP sentence." UpperCamelCase__ : List[Any] =tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE) UpperCamelCase__ : Dict =tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE) self.assertEqual(sum(tokens_r["token_type_ids"]) , sum(tokens_p["token_type_ids"])) self.assertEqual( sum(tokens_r["attention_mask"]) / len(tokens_r["attention_mask"]) , sum(tokens_p["attention_mask"]) / len(tokens_p["attention_mask"]) , ) UpperCamelCase__ : List[str] =tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"]) UpperCamelCase__ : str =tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"]) self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2]) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2]) self.assertSequenceEqual( __SCREAMING_SNAKE_CASE , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"]) self.assertSequenceEqual( __SCREAMING_SNAKE_CASE , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"])
582
1
"""simple docstring""" import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) _lowercase = logging.getLogger() def lowerCAmelCase__ ( __magic_name__ , __magic_name__ ) ->Any: __lowercase = "\n".join(__magic_name__ ) Path(__magic_name__ ).open("w" ).writelines(__magic_name__ ) _lowercase = '''patrickvonplaten/t5-tiny-random''' _lowercase = '''sshleifer/bart-tiny-random''' _lowercase = '''sshleifer/tiny-mbart''' _lowercase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class __a ( __a ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> List[str]: '''simple docstring''' __lowercase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source" __lowercase = input_file_name.parent / "utest_output.txt" assert not output_file_name.exists() __lowercase = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."] _dump_articles(_lowerCamelCase , _lowerCamelCase ) __lowercase = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" ) __lowercase = "translation_en_to_de" if model == T5_TINY else "summarization" __lowercase = f''' run_eval_search.py {model} {input_file_name} {output_file_name} --score_path {score_path} --task {task} --num_beams 2 --length_penalty 2.0 '''.split() with patch.object(_lowerCamelCase , "argv" , _lowerCamelCase ): run_generate() assert Path(_lowerCamelCase ).exists() # os.remove(Path(output_file_name)) def SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' self.run_eval_tester(_lowerCamelCase ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> int: '''simple docstring''' self.run_eval_tester(_lowerCamelCase ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> Dict: '''simple docstring''' __lowercase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source" __lowercase = input_file_name.parent / "utest_output.txt" assert not output_file_name.exists() __lowercase = { "en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"], "de": [ "Maschinelles Lernen ist großartig, oder?", "Ich esse gerne Bananen", "Morgen ist wieder ein toller Tag!", ], } __lowercase = Path(self.get_auto_remove_tmp_dir() ) __lowercase = str(tmp_dir / "scores.json" ) __lowercase = str(tmp_dir / "val.target" ) _dump_articles(_lowerCamelCase , text["en"] ) _dump_articles(_lowerCamelCase , text["de"] ) __lowercase = "translation_en_to_de" if model == T5_TINY else "summarization" __lowercase = f''' run_eval_search.py {model} {str(_lowerCamelCase )} {str(_lowerCamelCase )} --score_path {score_path} --reference_path {reference_path} --task {task} '''.split() testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] ) with patch.object(_lowerCamelCase , "argv" , _lowerCamelCase ): with CaptureStdout() as cs: run_search() __lowercase = [" num_beams | length_penalty", model, "Best score args"] __lowercase = ["Info"] if "translation" in task: expected_strings.append("bleu" ) else: expected_strings.extend(_lowerCamelCase ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(_lowerCamelCase ).exists() os.remove(Path(_lowerCamelCase ) )
118
"""simple docstring""" def lowerCAmelCase__ ( __magic_name__ = 1_0 ) ->str: if not isinstance(__magic_name__ , __magic_name__ ) or n < 0: raise ValueError("Invalid input" ) __lowercase = 1_0**n __lowercase = 2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , __magic_name__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F"{solution(10) = }")
118
1
"""simple docstring""" from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})""" def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=True ): '''simple docstring''' model.train() SCREAMING_SNAKE_CASE__ = model(snake_case__ ) SCREAMING_SNAKE_CASE__ = F.mse_loss(snake_case__ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(snake_case__ ) def A ( snake_case__ , snake_case__=False ): '''simple docstring''' set_seed(42 ) SCREAMING_SNAKE_CASE__ = RegressionModel() SCREAMING_SNAKE_CASE__ = deepcopy(snake_case__ ) SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 ) SCREAMING_SNAKE_CASE__ = DataLoader(snake_case__ , batch_size=16 ) model.to(accelerator.device ) if sched: SCREAMING_SNAKE_CASE__ = AdamW(params=model.parameters() , lr=1e-3 ) SCREAMING_SNAKE_CASE__ = AdamW(params=ddp_model.parameters() , lr=1e-3 ) SCREAMING_SNAKE_CASE__ = LambdaLR(snake_case__ , lr_lambda=lambda snake_case__ : epoch**0.65 ) SCREAMING_SNAKE_CASE__ = LambdaLR(snake_case__ , lr_lambda=lambda snake_case__ : epoch**0.65 ) # Make a copy of `model` if sched: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(snake_case__ , snake_case__ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def A ( snake_case__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(snake_case__ ) # Use a single batch SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(snake_case__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(snake_case__ ): step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) else: # Sync grads step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(snake_case__ ) )] def A ( snake_case__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(snake_case__ ) # Use a single batch SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(snake_case__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(snake_case__ ): step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) else: # Sync grads step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(snake_case__ ) )] def A ( snake_case__=False , snake_case__=False ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = Accelerator( split_batches=snake_case__ , dispatch_batches=snake_case__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(snake_case__ ) for iteration, batch in enumerate(snake_case__ ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values() # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(snake_case__ ): step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case__ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(snake_case__ ) )] GradientState._reset_state() def A ( snake_case__=False , snake_case__=False ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = Accelerator( split_batches=snake_case__ , dispatch_batches=snake_case__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(snake_case__ , snake_case__ ) for iteration, batch in enumerate(snake_case__ ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values() # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case__ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(snake_case__ ): step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n""" SCREAMING_SNAKE_CASE__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case__ )) if accelerator.num_processes > 1: check_model_parameters(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) GradientState._reset_state() def A ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = Accelerator() SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 ) SCREAMING_SNAKE_CASE__ = DataLoader(snake_case__ , batch_size=16 ) SCREAMING_SNAKE_CASE__ = RegressionDataset(length=96 ) SCREAMING_SNAKE_CASE__ = DataLoader(snake_case__ , batch_size=16 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(snake_case__ , snake_case__ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(snake_case__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case__ ) if iteration < len(snake_case__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(snake_case__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case__ ) if batch_num < len(snake_case__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def A ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = Accelerator() SCREAMING_SNAKE_CASE__ = accelerator.state if state.local_process_index == 0: print("""**Test `accumulate` gradient accumulation with dataloader break**""" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("""**Test NOOP `no_sync` context manager**""" ) test_noop_sync(snake_case__ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("""**Test Distributed `no_sync` context manager**""" ) test_distributed_sync(snake_case__ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation, """ , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation(snake_case__ , snake_case__ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation_with_opt_and_scheduler(snake_case__ , snake_case__ ) def A ( snake_case__ ): '''simple docstring''' main() if __name__ == "__main__": main()
616
"""simple docstring""" import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def A ( ): '''simple docstring''' with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(snake_case__ ): requests.request("""GET""" , """https://huggingface.co""" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 ) @pytest.mark.integration def A ( ): '''simple docstring''' with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("""GET""" , """https://huggingface.co""" ) def A ( ): '''simple docstring''' with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(snake_case__ ): http_head("""https://huggingface.co""" )
616
1
import numpy as np import torch from imwatermark import WatermarkEncoder # Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 a = 0b1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110 # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 a = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] class UpperCamelCase__ : def __init__( self : Any ): '''simple docstring''' lowercase_ = WATERMARK_BITS lowercase_ = WatermarkEncoder() self.encoder.set_watermark("""bits""" , self.watermark ) def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : torch.FloatTensor ): '''simple docstring''' if images.shape[-1] < 256: return images lowercase_ = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy() lowercase_ = [self.encoder.encode(UpperCamelCase__ , """dwtDct""" ) for image in images] lowercase_ = torch.from_numpy(np.array(UpperCamelCase__ ) ).permute(0 , 3 , 1 , 2 ) lowercase_ = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 ) return images
412
import re def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ = re.compile(r"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" ) if match := re.search(UpperCAmelCase__ , UpperCAmelCase__ ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator('+918827897895'))
412
1
"""simple docstring""" import string def __UpperCAmelCase ( _snake_case : str ): _lowercase = "" for i in sequence: _lowercase = ord(_snake_case ) if 6_5 <= extract <= 9_0: output += chr(1_5_5 - extract ) elif 9_7 <= extract <= 1_2_2: output += chr(2_1_9 - extract ) else: output += i return output def __UpperCAmelCase ( _snake_case : str ): _lowercase = string.ascii_letters _lowercase = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1] return "".join( letters_reversed[letters.index(_snake_case )] if c in letters else c for c in sequence ) def __UpperCAmelCase ( ): from timeit import timeit print("Running performance benchmarks..." ) _lowercase = "from string import printable ; from __main__ import atbash, atbash_slow" print(f"""> atbash_slow(): {timeit("atbash_slow(printable)", setup=_snake_case )} seconds""" ) print(f"""> atbash(): {timeit("atbash(printable)", setup=_snake_case )} seconds""" ) if __name__ == "__main__": for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"): print(f'''{example} encrypted in atbash: {atbash(example)}''') benchmark()
227
"""simple docstring""" import argparse import datetime def __UpperCAmelCase ( _snake_case : str ): _lowercase = { "0": "Sunday", "1": "Monday", "2": "Tuesday", "3": "Wednesday", "4": "Thursday", "5": "Friday", "6": "Saturday", } _lowercase = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(_snake_case ) < 1_1: raise ValueError("Must be 10 characters long" ) # Get month _lowercase = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 1_3: raise ValueError("Month must be between 1 - 12" ) _lowercase = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError("Date separator must be '-' or '/'" ) # Get day _lowercase = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 3_2: raise ValueError("Date must be between 1 - 31" ) # Get second separator _lowercase = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError("Date separator must be '-' or '/'" ) # Get year _lowercase = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 4_5 < y < 8_5_0_0: raise ValueError( "Year out of range. There has to be some sort of limit...right?" ) # Get datetime obj for validation _lowercase = datetime.date(int(_snake_case ), int(_snake_case ), int(_snake_case ) ) # Start math if m <= 2: _lowercase = y - 1 _lowercase = m + 1_2 # maths var _lowercase = int(str(_snake_case )[:2] ) _lowercase = int(str(_snake_case )[2:] ) _lowercase = int(2.6 * m - 5.3_9 ) _lowercase = int(c / 4 ) _lowercase = int(k / 4 ) _lowercase = int(d + k ) _lowercase = int(t + u + v + x ) _lowercase = int(z - (2 * c) ) _lowercase = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError("The date was evaluated incorrectly. Contact developer." ) # Response _lowercase = f"""Your date {date_input}, is a {days[str(_snake_case )]}!""" return response if __name__ == "__main__": import doctest doctest.testmod() __UpperCamelCase : Any = argparse.ArgumentParser( description=( "Find out what day of the week nearly any date is or was. Enter " "date as a string in the mm-dd-yyyy or mm/dd/yyyy format" ) ) parser.add_argument( "date_input", type=str, help="Date as a string (mm-dd-yyyy or mm/dd/yyyy)" ) __UpperCamelCase : str = parser.parse_args() zeller(args.date_input)
227
1
from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar lowerCamelCase__ : Optional[Any] = TypeVar("""KEY""") lowerCamelCase__ : Any = TypeVar("""VAL""") @dataclass(frozen=UpperCAmelCase_ , slots=UpperCAmelCase_ ) class _snake_case ( Generic[KEY, VAL] ): __lowerCAmelCase : KEY __lowerCAmelCase : VAL class _snake_case ( _Item ): def __init__( self): '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) def __bool__( self): '''simple docstring''' return False lowerCamelCase__ : str = _DeletedItem() class _snake_case ( MutableMapping[KEY, VAL] ): def __init__( self , SCREAMING_SNAKE_CASE_ = 8 , SCREAMING_SNAKE_CASE_ = 0.7_5): '''simple docstring''' lowercase__ : int = initial_block_size lowercase__ : list[_Item | None] = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 lowercase__ : Union[str, Any] = capacity_factor lowercase__ : Tuple = 0 def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' return hash(SCREAMING_SNAKE_CASE_) % len(self._buckets) def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' return (ind + 1) % len(self._buckets) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Optional[int] = self._buckets[ind] if not stored: lowercase__ : Optional[Any] = _Item(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) self._len += 1 return True elif stored.key == key: lowercase__ : Optional[int] = _Item(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) return True else: return False def lowercase__ ( self): '''simple docstring''' lowercase__ : List[Any] = len(self._buckets) * self._capacity_factor return len(self) >= int(SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' if len(self._buckets) <= self._initial_block_size: return False lowercase__ : Optional[Any] = len(self._buckets) * self._capacity_factor / 2 return len(self) < limit def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : str = self._buckets lowercase__ : Optional[Any] = [None] * new_size lowercase__ : Optional[Any] = 0 for item in old_buckets: if item: self._add_item(item.key , item.val) def lowercase__ ( self): '''simple docstring''' self._resize(len(self._buckets) * 2) def lowercase__ ( self): '''simple docstring''' self._resize(len(self._buckets) // 2) def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Dict = self._get_bucket_index(SCREAMING_SNAKE_CASE_) for _ in range(len(self._buckets)): yield ind lowercase__ : List[Any] = self._get_next_ind(SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE_): if self._try_set(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): break def __setitem__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' if self._is_full(): self._size_up() self._add_item(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) def __delitem__( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE_): lowercase__ : Any = self._buckets[ind] if item is None: raise KeyError(SCREAMING_SNAKE_CASE_) if item is _deleted: continue if item.key == key: lowercase__ : List[str] = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE_): lowercase__ : Optional[Any] = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(SCREAMING_SNAKE_CASE_) def __len__( self): '''simple docstring''' return self._len def __iter__( self): '''simple docstring''' yield from (item.key for item in self._buckets if item) def __repr__( self): '''simple docstring''' lowercase__ : Any = """ ,""".join( f'{item.key}: {item.val}' for item in self._buckets if item) return f'HashMap({val_string})'
12
"""simple docstring""" import json import os from pathlib import Path import pytest from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadManager from datasets.utils.file_utils import hash_url_to_filename _A = 'http://www.mocksite.com/file1.txt' _A = '"text": ["foo", "foo"]' _A = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8' class lowerCamelCase : '''simple docstring''' a = 2_0_0 a = {"Content-Length": "100"} a = {} def lowerCAmelCase_ ( self : Union[str, Any] , **_snake_case : str ) -> Any: return [bytes(_snake_case , "utf-8" )] def SCREAMING_SNAKE_CASE ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: return MockResponse() @pytest.mark.parametrize("urls_type" , [str, list, dict] ) def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: import requests monkeypatch.setattr(__UpperCAmelCase , "request" , __UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = URL if issubclass(__UpperCAmelCase , __UpperCAmelCase ): SCREAMING_SNAKE_CASE__ = url elif issubclass(__UpperCAmelCase , __UpperCAmelCase ): SCREAMING_SNAKE_CASE__ = [url] elif issubclass(__UpperCAmelCase , __UpperCAmelCase ): SCREAMING_SNAKE_CASE__ = {"train": url} SCREAMING_SNAKE_CASE__ = "dummy" SCREAMING_SNAKE_CASE__ = "downloads" SCREAMING_SNAKE_CASE__ = tmp_path SCREAMING_SNAKE_CASE__ = DownloadConfig( cache_dir=os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , use_etag=__UpperCAmelCase , ) SCREAMING_SNAKE_CASE__ = DownloadManager(dataset_name=__UpperCAmelCase , download_config=__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = dl_manager.download(__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = urls for downloaded_paths in [downloaded_paths]: if isinstance(__UpperCAmelCase , __UpperCAmelCase ): SCREAMING_SNAKE_CASE__ = [downloaded_paths] SCREAMING_SNAKE_CASE__ = [urls] elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): assert "train" in downloaded_paths.keys() SCREAMING_SNAKE_CASE__ = downloaded_paths.values() SCREAMING_SNAKE_CASE__ = urls.values() assert downloaded_paths for downloaded_path, input_url in zip(__UpperCAmelCase , __UpperCAmelCase ): assert downloaded_path == dl_manager.downloaded_paths[input_url] SCREAMING_SNAKE_CASE__ = Path(__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = downloaded_path.parts assert parts[-1] == HASH assert parts[-2] == cache_subdir assert downloaded_path.exists() SCREAMING_SNAKE_CASE__ = downloaded_path.read_text() assert content == CONTENT SCREAMING_SNAKE_CASE__ = downloaded_path.with_suffix(".json" ) assert metadata_downloaded_path.exists() SCREAMING_SNAKE_CASE__ = json.loads(metadata_downloaded_path.read_text() ) assert metadata_content == {"url": URL, "etag": None} @pytest.mark.parametrize("paths_type" , [str, list, dict] ) def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: SCREAMING_SNAKE_CASE__ = str(__UpperCAmelCase ) if issubclass(__UpperCAmelCase , __UpperCAmelCase ): SCREAMING_SNAKE_CASE__ = filename elif issubclass(__UpperCAmelCase , __UpperCAmelCase ): SCREAMING_SNAKE_CASE__ = [filename] elif issubclass(__UpperCAmelCase , __UpperCAmelCase ): SCREAMING_SNAKE_CASE__ = {"train": filename} SCREAMING_SNAKE_CASE__ = "dummy" SCREAMING_SNAKE_CASE__ = xz_file.parent SCREAMING_SNAKE_CASE__ = "extracted" SCREAMING_SNAKE_CASE__ = DownloadConfig( cache_dir=__UpperCAmelCase , use_etag=__UpperCAmelCase , ) SCREAMING_SNAKE_CASE__ = DownloadManager(dataset_name=__UpperCAmelCase , download_config=__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = dl_manager.extract(__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = paths for extracted_paths in [extracted_paths]: if isinstance(__UpperCAmelCase , __UpperCAmelCase ): SCREAMING_SNAKE_CASE__ = [extracted_paths] SCREAMING_SNAKE_CASE__ = [paths] elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): assert "train" in extracted_paths.keys() SCREAMING_SNAKE_CASE__ = extracted_paths.values() SCREAMING_SNAKE_CASE__ = paths.values() assert extracted_paths for extracted_path, input_path in zip(__UpperCAmelCase , __UpperCAmelCase ): assert extracted_path == dl_manager.extracted_paths[input_path] SCREAMING_SNAKE_CASE__ = Path(__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = extracted_path.parts assert parts[-1] == hash_url_to_filename(__UpperCAmelCase , etag=__UpperCAmelCase ) assert parts[-2] == extracted_subdir assert extracted_path.exists() SCREAMING_SNAKE_CASE__ = extracted_path.read_text() SCREAMING_SNAKE_CASE__ = text_file.read_text() assert extracted_file_content == expected_file_content def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ) -> int: assert path.endswith(".jsonl" ) for num_items, line in enumerate(__UpperCAmelCase , start=1 ): SCREAMING_SNAKE_CASE__ = json.loads(line.decode("utf-8" ) ) assert item.keys() == {"col_1", "col_2", "col_3"} assert num_items == 4 @pytest.mark.parametrize("archive_jsonl" , ["tar_jsonl_path", "zip_jsonl_path"] ) def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ = request.getfixturevalue(__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = DownloadManager() for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__UpperCAmelCase ) , start=1 ): _test_jsonl(__UpperCAmelCase , __UpperCAmelCase ) assert num_jsonl == 2 @pytest.mark.parametrize("archive_nested_jsonl" , ["tar_nested_jsonl_path", "zip_nested_jsonl_path"] ) def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ) -> Any: SCREAMING_SNAKE_CASE__ = request.getfixturevalue(__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = DownloadManager() for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__UpperCAmelCase ) , start=1 ): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__UpperCAmelCase ) , start=1 ): _test_jsonl(__UpperCAmelCase , __UpperCAmelCase ) assert num_tar == 1 assert num_jsonl == 2 def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> Tuple: SCREAMING_SNAKE_CASE__ = DownloadManager() for num_file, file in enumerate(dl_manager.iter_files(__UpperCAmelCase ) , start=1 ): assert os.path.basename(__UpperCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt") assert num_file == 2
159
0
def UpperCAmelCase__ ( _A ): """simple docstring""" a_ = [int(lowercase_ ) for i in ip_va_address.split('''.''' ) if i.isdigit()] return len(lowercase_ ) == 4 and all(0 <= int(lowercase_ ) <= 254 for octet in octets ) if __name__ == "__main__": UpperCamelCase__ = input().strip() UpperCamelCase__ = '''valid''' if is_ip_va_address_valid(ip) else '''invalid''' print(F"""{ip} is a {valid_or_invalid} IP v4 address.""")
707
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase__ = { '''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''], '''tokenization_cpmant''': ['''CpmAntTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ '''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CpmAntForCausalLM''', '''CpmAntModel''', '''CpmAntPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
143
0
from ....utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : List[Any] , _snake_case : str , _snake_case : List[Any]=None , _snake_case : Union[str, Any]=20_48 ): """simple docstring""" A__ = config.__dict__ A__ = modal_hidden_size if num_labels: A__ = num_labels
9
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class __lowerCamelCase (_a ): @slow @require_torch def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' __UpperCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny','prajjwal1/bert-tiny' ) __UpperCamelCase = BertTokenizer.from_pretrained('bert-base-uncased' ) __UpperCamelCase = bertabert.config.encoder.vocab_size __UpperCamelCase = tokenizer.sep_token_id __UpperCamelCase = tokenizer.cls_token_id __UpperCamelCase = 128 __UpperCamelCase = datasets.load_dataset('cnn_dailymail','3.0.0',split='train[:1%]' ) __UpperCamelCase = datasets.load_dataset('cnn_dailymail','3.0.0',split='validation[:1%]' ) __UpperCamelCase = train_dataset.select(range(32 ) ) __UpperCamelCase = val_dataset.select(range(16 ) ) __UpperCamelCase = 4 def _map_to_encoder_decoder_inputs(A_: Dict ): # Tokenizer will automatically set [BOS] <text> [EOS] __UpperCamelCase = tokenizer(batch['article'],padding='max_length',truncation=A_,max_length=512 ) __UpperCamelCase = tokenizer(batch['highlights'],padding='max_length',truncation=A_,max_length=128 ) __UpperCamelCase = inputs.input_ids __UpperCamelCase = inputs.attention_mask __UpperCamelCase = outputs.input_ids __UpperCamelCase = outputs.input_ids.copy() __UpperCamelCase = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels'] ] __UpperCamelCase = outputs.attention_mask assert all(len(A_ ) == 512 for x in inputs.input_ids ) assert all(len(A_ ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(A_: str ): __UpperCamelCase = pred.label_ids __UpperCamelCase = pred.predictions # all unnecessary tokens are removed __UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ ) __UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ ) __UpperCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(A_ ) )] ) / len(A_ ) return {"accuracy": accuracy} # map train dataset __UpperCamelCase = train_dataset.map( _map_to_encoder_decoder_inputs,batched=A_,batch_size=A_,remove_columns=['article', 'highlights'],) train_dataset.set_format( type='torch',columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'],) # same for validation dataset __UpperCamelCase = val_dataset.map( _map_to_encoder_decoder_inputs,batched=A_,batch_size=A_,remove_columns=['article', 'highlights'],) val_dataset.set_format( type='torch',columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'],) __UpperCamelCase = self.get_auto_remove_tmp_dir() __UpperCamelCase = SeqaSeqTrainingArguments( output_dir=A_,per_device_train_batch_size=A_,per_device_eval_batch_size=A_,predict_with_generate=A_,evaluation_strategy='steps',do_train=A_,do_eval=A_,warmup_steps=0,eval_steps=2,logging_steps=2,) # instantiate trainer __UpperCamelCase = SeqaSeqTrainer( model=A_,args=A_,compute_metrics=_compute_metrics,train_dataset=A_,eval_dataset=A_,tokenizer=A_,) # start training trainer.train()
1
0
import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor __A = logging.get_logger(__name__) class _lowerCAmelCase ( a ): """simple docstring""" def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' warnings.warn( 'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use YolosImageProcessor instead.' , __UpperCAmelCase , ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
701
"""simple docstring""" import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class _lowerCAmelCase ( ctypes.Structure ): """simple docstring""" __magic_name__ :Union[str, Any] = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)] def __A () ->Dict: """simple docstring""" if os.name == "nt": lowerCAmelCase__ :Optional[Any] = CursorInfo() lowerCAmelCase__ :int = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) ) lowerCAmelCase__ :Any = False ctypes.windll.kernelaa.SetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) ) elif os.name == "posix": sys.stdout.write('\033[?25l' ) sys.stdout.flush() def __A () ->Any: """simple docstring""" if os.name == "nt": lowerCAmelCase__ :List[Any] = CursorInfo() lowerCAmelCase__ :Optional[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) ) lowerCAmelCase__ :Dict = True ctypes.windll.kernelaa.SetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) ) elif os.name == "posix": sys.stdout.write('\033[?25h' ) sys.stdout.flush() @contextmanager def __A () ->Any: """simple docstring""" try: hide_cursor() yield finally: show_cursor()
560
0
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase ( a_ , unittest.TestCase ): '''simple docstring''' _A : str = GPTaTokenizer _A : Optional[Any] = GPTaTokenizerFast _A : Optional[int] = True _A : str = {'''add_prefix_space''': True} _A : Union[str, Any] = False def lowerCAmelCase ( self : str ) -> str: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __lowercase : Any = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] __lowercase : Any = dict(zip(_a , range(len(_a ) ) ) ) __lowercase : Dict = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] __lowercase : int = {"unk_token": "<unk>"} __lowercase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __lowercase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(_a ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(_a ) ) def lowerCAmelCase ( self : str , **__a : str ) -> List[Any]: """simple docstring""" kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **_a ) def lowerCAmelCase ( self : Optional[int] , **__a : Dict ) -> Optional[int]: """simple docstring""" kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **_a ) def lowerCAmelCase ( self : Optional[Any] , __a : Tuple ) -> List[str]: """simple docstring""" __lowercase : Tuple = "lower newer" __lowercase : Dict = "lower newer" return input_text, output_text def lowerCAmelCase ( self : Optional[Any] ) -> str: """simple docstring""" __lowercase : str = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) __lowercase : Union[str, Any] = "lower newer" __lowercase : Any = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] __lowercase : int = tokenizer.tokenize(_a , add_prefix_space=_a ) self.assertListEqual(_a , _a ) __lowercase : int = tokens + [tokenizer.unk_token] __lowercase : List[Any] = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a ) def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" if not self.test_rust_tokenizer: return __lowercase : Union[str, Any] = self.get_tokenizer() __lowercase : int = self.get_rust_tokenizer(add_prefix_space=_a ) __lowercase : Union[str, Any] = "lower newer" # Testing tokenization __lowercase : Union[str, Any] = tokenizer.tokenize(_a , add_prefix_space=_a ) __lowercase : List[Any] = rust_tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) # Testing conversion to ids without special tokens __lowercase : Dict = tokenizer.encode(_a , add_special_tokens=_a , add_prefix_space=_a ) __lowercase : Dict = rust_tokenizer.encode(_a , add_special_tokens=_a ) self.assertListEqual(_a , _a ) # Testing conversion to ids with special tokens __lowercase : Any = self.get_rust_tokenizer(add_prefix_space=_a ) __lowercase : Any = tokenizer.encode(_a , add_prefix_space=_a ) __lowercase : Any = rust_tokenizer.encode(_a ) self.assertListEqual(_a , _a ) # Testing the unknown token __lowercase : Union[str, Any] = tokens + [rust_tokenizer.unk_token] __lowercase : str = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_a ) , _a ) def lowerCAmelCase ( self : int , *__a : Optional[int] , **__a : Optional[Any] ) -> Optional[int]: """simple docstring""" pass def lowerCAmelCase ( self : str , __a : Dict=15 ) -> List[str]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): __lowercase : int = self.rust_tokenizer_class.from_pretrained(_a , **_a ) # Simple input __lowercase : int = "This is a simple input" __lowercase : Optional[int] = ["This is a simple input 1", "This is a simple input 2"] __lowercase : Union[str, Any] = ("This is a simple input", "This is a pair") __lowercase : Any = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding="""max_length""" ) # Simple input self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding="""max_length""" ) # Simple input self.assertRaises( _a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding="""max_length""" , ) # Pair input self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding="""max_length""" ) # Pair input self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding="""max_length""" ) # Pair input self.assertRaises( _a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding="""max_length""" , ) def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __lowercase : List[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" ) # Simple input __lowercase : int = "This is a simple input" __lowercase : Tuple = ["This is a simple input looooooooong", "This is a simple input"] __lowercase : Any = ("This is a simple input", "This is a pair") __lowercase : str = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] __lowercase : List[Any] = tokenizer.pad_token_id __lowercase : List[Any] = tokenizer(_a , padding="""max_length""" , max_length=30 , return_tensors="""np""" ) __lowercase : List[str] = tokenizer(_a , padding=_a , truncate=_a , return_tensors="""np""" ) __lowercase : List[Any] = tokenizer(*_a , padding="""max_length""" , max_length=60 , return_tensors="""np""" ) __lowercase : Any = tokenizer(_a , padding=_a , truncate=_a , return_tensors="""np""" ) # s # test single string max_length padding self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s["""input_ids"""] ) self.assertTrue(0 in out_s["""attention_mask"""] ) # s2 # test automatic padding self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] ) self.assertFalse(0 in out_sa["""attention_mask"""][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] ) self.assertTrue(0 in out_sa["""attention_mask"""][1] ) # p # test single pair max_length padding self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p["""input_ids"""] ) self.assertTrue(0 in out_p["""attention_mask"""] ) # p2 # test automatic padding pair self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] ) self.assertFalse(0 in out_pa["""attention_mask"""][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] ) self.assertTrue(0 in out_pa["""attention_mask"""][1] ) def lowerCAmelCase ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __lowercase : List[Any] = "$$$" __lowercase : List[str] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=_a , add_bos_token=_a ) __lowercase : List[Any] = "This is a simple input" __lowercase : str = ["This is a simple input 1", "This is a simple input 2"] __lowercase : Union[str, Any] = tokenizer.bos_token_id __lowercase : List[str] = tokenizer(_a ) __lowercase : Any = tokenizer(_a ) self.assertEqual(out_s.input_ids[0] , _a ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) __lowercase : List[Any] = tokenizer.decode(out_s.input_ids ) __lowercase : Tuple = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , _a ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def lowerCAmelCase ( self : Dict ) -> List[Any]: """simple docstring""" pass def lowerCAmelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" __lowercase : Dict = [self.get_tokenizer(do_lower_case=_a , add_bos_token=_a )] for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): __lowercase : Optional[Any] = "Encode this." __lowercase : Union[str, Any] = "This one too please." __lowercase : Dict = tokenizer.encode(_a , add_special_tokens=_a ) encoded_sequence += tokenizer.encode(_a , add_special_tokens=_a ) __lowercase : Optional[Any] = tokenizer.encode_plus( _a , _a , add_special_tokens=_a , return_special_tokens_mask=_a , ) __lowercase : Tuple = encoded_sequence_dict["input_ids"] __lowercase : int = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(_a ) , len(_a ) ) __lowercase : List[str] = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(_a ) ] __lowercase : List[str] = [x for x in filtered_sequence if x is not None] self.assertEqual(_a , _a ) @require_tokenizers class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase ( self : Tuple ) -> List[str]: """simple docstring""" __lowercase : Tuple = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=_a ) __lowercase : Any = "A photo of a cat" __lowercase : List[str] = tokenizer.encode( _a , ) self.assertEqual(_a , [2, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained("""test_opt""" ) __lowercase : Union[str, Any] = AutoTokenizer.from_pretrained("""./test_opt""" ) __lowercase : Tuple = tokenizer.encode( _a , ) self.assertEqual(_a , [2, 250, 1345, 9, 10, 4758] ) def lowerCAmelCase ( self : str ) -> Dict: """simple docstring""" __lowercase : Union[str, Any] = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , use_slow=_a ) __lowercase : Union[str, Any] = "A photo of a cat" __lowercase : Union[str, Any] = tokenizer.encode( _a , ) # Same as above self.assertEqual(_a , [2, 250, 1345, 9, 10, 4758] ) @unittest.skip("""This test is failing because of a bug in the fast tokenizer""" ) def lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" __lowercase : int = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=_a ) __lowercase : List[Any] = "bos" __lowercase : Optional[int] = tokenizer.get_vocab()["bos"] __lowercase : List[str] = "A photo of a cat" __lowercase : Dict = tokenizer.encode( _a , ) # We changed the bos token self.assertEqual(_a , [31957, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained("""./tok""" ) __lowercase : Tuple = AutoTokenizer.from_pretrained("""./tok""" ) self.assertTrue(tokenizer.is_fast ) __lowercase : str = tokenizer.encode( _a , ) self.assertEqual(_a , [31957, 250, 1345, 9, 10, 4758] )
149
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() _A : Optional[int] = logging.get_logger(__name__) def __magic_name__ ( __snake_case : str ) -> YolosConfig: lowercase : int = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: lowercase : Union[str, Any] = 192 lowercase : Tuple = 768 lowercase : Optional[Any] = 12 lowercase : List[str] = 3 lowercase : int = [800, 1333] lowercase : Union[str, Any] = False elif yolos_name == "yolos_s_dWr": lowercase : int = 330 lowercase : List[str] = 14 lowercase : Dict = 6 lowercase : Any = 1320 elif "yolos_s" in yolos_name: lowercase : str = 384 lowercase : int = 1536 lowercase : Any = 12 lowercase : int = 6 elif "yolos_b" in yolos_name: lowercase : str = [800, 1344] lowercase : Tuple = 91 lowercase : Tuple = "huggingface/label-files" lowercase : Any = "coco-detection-id2label.json" lowercase : str = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) ) lowercase : List[str] = {int(__snake_case ): v for k, v in idalabel.items()} lowercase : Union[str, Any] = idalabel lowercase : Union[str, Any] = {v: k for k, v in idalabel.items()} return config def __magic_name__ ( __snake_case : dict , __snake_case : YolosConfig , __snake_case : bool = False ) -> int: for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase : Dict = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) lowercase : int = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict lowercase : Union[str, Any] = in_proj_weight[: config.hidden_size, :] lowercase : int = in_proj_bias[: config.hidden_size] lowercase : str = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase : List[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase : Dict = in_proj_weight[-config.hidden_size :, :] lowercase : List[Any] = in_proj_bias[-config.hidden_size :] def __magic_name__ ( __snake_case : str ) -> str: if "backbone" in name: lowercase : Union[str, Any] = name.replace("backbone" , "vit" ) if "cls_token" in name: lowercase : Union[str, Any] = name.replace("cls_token" , "embeddings.cls_token" ) if "det_token" in name: lowercase : Dict = name.replace("det_token" , "embeddings.detection_tokens" ) if "mid_pos_embed" in name: lowercase : Optional[Any] = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" ) if "pos_embed" in name: lowercase : Any = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: lowercase : Optional[int] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "blocks" in name: lowercase : Union[str, Any] = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: lowercase : Optional[int] = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: lowercase : Dict = name.replace("attn" , "attention.self" ) if "norm1" in name: lowercase : List[str] = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: lowercase : Union[str, Any] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: lowercase : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: lowercase : List[str] = name.replace("mlp.fc2" , "output.dense" ) if "class_embed" in name: lowercase : List[Any] = name.replace("class_embed" , "class_labels_classifier" ) if "bbox_embed" in name: lowercase : Optional[Any] = name.replace("bbox_embed" , "bbox_predictor" ) if "vit.norm" in name: lowercase : Optional[int] = name.replace("vit.norm" , "vit.layernorm" ) return name def __magic_name__ ( __snake_case : dict , __snake_case : YolosForObjectDetection ) -> dict: for key in orig_state_dict.copy().keys(): lowercase : List[Any] = orig_state_dict.pop(__snake_case ) if "qkv" in key: lowercase : Union[str, Any] = key.split("." ) lowercase : List[Any] = int(key_split[2] ) lowercase : Union[str, Any] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: lowercase : int = val[:dim, :] lowercase : List[Any] = val[ dim : dim * 2, : ] lowercase : Any = val[-dim:, :] else: lowercase : Dict = val[:dim] lowercase : Dict = val[dim : dim * 2] lowercase : str = val[-dim:] else: lowercase : List[str] = val return orig_state_dict def __magic_name__ ( ) -> torch.Tensor: lowercase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg" lowercase : Tuple = Image.open(requests.get(__snake_case , stream=__snake_case ).raw ) return im @torch.no_grad() def __magic_name__ ( __snake_case : str , __snake_case : str , __snake_case : str , __snake_case : bool = False ) -> Dict: lowercase : Optional[int] = get_yolos_config(__snake_case ) # load original state_dict lowercase : str = torch.load(__snake_case , map_location="cpu" )["model"] # load 🤗 model lowercase : str = YolosForObjectDetection(__snake_case ) model.eval() lowercase : Dict = convert_state_dict(__snake_case , __snake_case ) model.load_state_dict(__snake_case ) # Check outputs on an image, prepared by YolosImageProcessor lowercase : Any = 800 if yolos_name != "yolos_ti" else 512 lowercase : List[Any] = YolosImageProcessor(format="coco_detection" , size=__snake_case ) lowercase : Any = image_processor(images=prepare_img() , return_tensors="pt" ) lowercase : Any = model(**__snake_case ) lowercase , lowercase : Tuple = outputs.logits, outputs.pred_boxes lowercase , lowercase : int = None, None if yolos_name == "yolos_ti": lowercase : int = torch.tensor( [[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] ) lowercase : List[Any] = torch.tensor( [[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] ) elif yolos_name == "yolos_s_200_pre": lowercase : List[Any] = torch.tensor( [[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] ) lowercase : List[Any] = torch.tensor( [[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] ) elif yolos_name == "yolos_s_300_pre": lowercase : Optional[int] = torch.tensor( [[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] ) lowercase : Tuple = torch.tensor( [[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] ) elif yolos_name == "yolos_s_dWr": lowercase : Tuple = torch.tensor( [[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] ) lowercase : List[str] = torch.tensor( [[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] ) elif yolos_name == "yolos_base": lowercase : Optional[Any] = torch.tensor( [[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] ) lowercase : List[str] = torch.tensor( [[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] ) else: raise ValueError(f"""Unknown yolos_name: {yolos_name}""" ) assert torch.allclose(logits[0, :3, :3] , __snake_case , atol=1E-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , __snake_case , atol=1E-4 ) Path(__snake_case ).mkdir(exist_ok=__snake_case ) print(f"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__snake_case ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__snake_case ) if push_to_hub: lowercase : Optional[int] = { "yolos_ti": "yolos-tiny", "yolos_s_200_pre": "yolos-small", "yolos_s_300_pre": "yolos-small-300", "yolos_s_dWr": "yolos-small-dwr", "yolos_base": "yolos-base", } print("Pushing to the hub..." ) lowercase : Optional[Any] = model_mapping[yolos_name] image_processor.push_to_hub(__snake_case , organization="hustvl" ) model.push_to_hub(__snake_case , organization="hustvl" ) if __name__ == "__main__": _A : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--yolos_name""", default="""yolos_s_200_pre""", type=str, help=( """Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',""" """ 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'.""" ), ) parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) _A : Any = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
361
0
"""simple docstring""" import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class lowerCamelCase_ ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase__ ( self ): SCREAMING_SNAKE_CASE__ = "ylacombe/bark-small" SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp() SCREAMING_SNAKE_CASE__ = "en_speaker_1" SCREAMING_SNAKE_CASE__ = "This is a test string" SCREAMING_SNAKE_CASE__ = "speaker_embeddings_path.json" SCREAMING_SNAKE_CASE__ = "speaker_embeddings" def lowerCAmelCase__ ( self , **UpperCAmelCase__ ): return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self ): shutil.rmtree(self.tmpdirname ) def lowerCAmelCase__ ( self ): SCREAMING_SNAKE_CASE__ = self.get_tokenizer() SCREAMING_SNAKE_CASE__ = BarkProcessor(tokenizer=UpperCAmelCase__ ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def lowerCAmelCase__ ( self ): SCREAMING_SNAKE_CASE__ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) SCREAMING_SNAKE_CASE__ = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def lowerCAmelCase__ ( self ): SCREAMING_SNAKE_CASE__ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) SCREAMING_SNAKE_CASE__ = 35 SCREAMING_SNAKE_CASE__ = 2 SCREAMING_SNAKE_CASE__ = 8 SCREAMING_SNAKE_CASE__ = { "semantic_prompt": np.ones(UpperCAmelCase__ ), "coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ), "fine_prompt": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset SCREAMING_SNAKE_CASE__ = processor(text=self.input_string , voice_preset=UpperCAmelCase__ ) SCREAMING_SNAKE_CASE__ = inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase__ , np.array([] ) ).tolist() ) # test loading voice preset from npz file SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , "file.npz" ) np.savez(UpperCAmelCase__ , **UpperCAmelCase__ ) SCREAMING_SNAKE_CASE__ = processor(text=self.input_string , voice_preset=UpperCAmelCase__ ) SCREAMING_SNAKE_CASE__ = inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase__ , np.array([] ) ).tolist() ) # test loading voice preset from the hub SCREAMING_SNAKE_CASE__ = processor(text=self.input_string , voice_preset=self.voice_preset ) def lowerCAmelCase__ ( self ): SCREAMING_SNAKE_CASE__ = self.get_tokenizer() SCREAMING_SNAKE_CASE__ = BarkProcessor(tokenizer=UpperCAmelCase__ ) SCREAMING_SNAKE_CASE__ = processor(text=self.input_string ) SCREAMING_SNAKE_CASE__ = tokenizer( self.input_string , padding="max_length" , max_length=256 , add_special_tokens=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
711
"""simple docstring""" import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values _lowerCamelCase = argparse.ArgumentParser() parser.add_argument('--user', type=str, default='ubuntu') parser.add_argument('--host', type=str, default='localhost') parser.add_argument('--key_path', type=str, default=None) parser.add_argument('--instance', type=str, default='V100:1') parser.add_argument('--provider', type=str, default='cheapest') parser.add_argument('--use_spot', type=bool, default=False) parser.add_argument('--example', type=str, default='pytorch/text-generation/run_generation.py') _lowerCamelCase , _lowerCamelCase = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError('Cannot specify both BYO and on-demand cluster args') _lowerCamelCase = rh.cluster( name='rh-cluster', ips=[args.host], ssh_creds={'ssh_user': args.user, 'ssh_private_key': args.key_path} ) else: _lowerCamelCase = rh.cluster( name='rh-cluster', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) _lowerCamelCase = args.example.rsplit('/', 1)[0] # Set up remote environment cluster.install_packages(['pip:./']) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([f"""pip install -r transformers/examples/{example_dir}/requirements.txt"""]) cluster.run(['pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117']) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([f"""python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"""]) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
112
0
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __lowercase : List[str] = logging.get_logger(__name__) __lowercase : Dict = "▁" __lowercase : Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model"} __lowercase : Optional[int] = { "vocab_file": { "xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model", "xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model", "xlm-roberta-large-finetuned-conll02-dutch": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll02-spanish": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll03-english": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll03-german": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model" ), } } __lowercase : Any = { "xlm-roberta-base": 512, "xlm-roberta-large": 512, "xlm-roberta-large-finetuned-conll02-dutch": 512, "xlm-roberta-large-finetuned-conll02-spanish": 512, "xlm-roberta-large-finetuned-conll03-english": 512, "xlm-roberta-large-finetuned-conll03-german": 512, } class _A ( _UpperCAmelCase ): """simple docstring""" UpperCamelCase_ : List[str] = VOCAB_FILES_NAMES UpperCamelCase_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Any = ['''input_ids''', '''attention_mask'''] def __init__( self : str , A_ : Any , A_ : Union[str, Any]="<s>" , A_ : Any="</s>" , A_ : Any="</s>" , A_ : Union[str, Any]="<s>" , A_ : str="<unk>" , A_ : Union[str, Any]="<pad>" , A_ : str="<mask>" , A_ : Optional[Dict[str, Any]] = None , **A_ : Any , ) -> None: # Mask token behave like a normal word, i.e. include the space before it __snake_case = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token __snake_case = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , ) __snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(A_ ) ) __snake_case = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token __snake_case = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __snake_case = 1 __snake_case = len(self.sp_model ) + self.fairseq_offset __snake_case = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Union[str, Any] ) -> Union[str, Any]: __snake_case = self.__dict__.copy() __snake_case = None __snake_case = self.sp_model.serialized_model_proto() return state def __setstate__( self : List[str] , A_ : Tuple ) -> str: __snake_case = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __snake_case = {} __snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def lowercase ( self : int , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __snake_case = [self.cls_token_id] __snake_case = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowercase ( self : List[Any] , A_ : List[int] , A_ : Optional[List[int]] = None , A_ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ ) if token_ids_a is None: return [1] + ([0] * len(A_ )) + [1] return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1] def lowercase ( self : List[str] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]: __snake_case = [self.sep_token_id] __snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowercase ( self : Any ) -> str: return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def lowercase ( self : Optional[Any] ) -> Union[str, Any]: __snake_case = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowercase ( self : Union[str, Any] , A_ : str ) -> List[str]: return self.sp_model.encode(A_ , out_type=A_ ) def lowercase ( self : Optional[Any] , A_ : List[str] ) -> Union[str, Any]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __snake_case = self.sp_model.PieceToId(A_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowercase ( self : str , A_ : Optional[int] ) -> str: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowercase ( self : str , A_ : Dict ) -> Any: __snake_case = ''''''.join(A_ ).replace(A_ , ''' ''' ).strip() return out_string def lowercase ( self : Any , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(A_ ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return __snake_case = os.path.join( A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ , '''wb''' ) as fi: __snake_case = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,)
564
"""simple docstring""" from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch __lowercase : Tuple = logging.get_logger(__name__) class _A ( _UpperCAmelCase ): """simple docstring""" UpperCamelCase_ : List[str] = ['''pixel_values'''] def __init__( self : Optional[int] , A_ : bool = True , A_ : Optional[Dict[str, int]] = None , A_ : PILImageResampling = PILImageResampling.BILINEAR , A_ : bool = True , A_ : Dict[str, int] = None , A_ : bool = True , A_ : Union[int, float] = 1 / 255 , A_ : bool = True , A_ : Optional[Union[float, List[float]]] = None , A_ : Optional[Union[float, List[float]]] = None , **A_ : str , ) -> None: super().__init__(**A_ ) __snake_case = size if size is not None else {'''shortest_edge''': 256} __snake_case = get_size_dict(A_ , default_to_square=A_ ) __snake_case = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} __snake_case = get_size_dict(A_ , param_name='''crop_size''' ) __snake_case = do_resize __snake_case = size __snake_case = resample __snake_case = do_center_crop __snake_case = crop_size __snake_case = do_rescale __snake_case = rescale_factor __snake_case = do_normalize __snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase ( self : List[str] , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : List[Any] , ) -> np.ndarray: __snake_case = get_size_dict(A_ , default_to_square=A_ ) if "shortest_edge" not in size: raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" ) __snake_case = get_resize_output_image_size(A_ , size=size['''shortest_edge'''] , default_to_square=A_ ) return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ ) def lowercase ( self : Tuple , A_ : np.ndarray , A_ : Dict[str, int] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Union[str, Any] , ) -> np.ndarray: __snake_case = get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" ) return center_crop(A_ , size=(size['''height'''], size['''width''']) , data_format=A_ , **A_ ) def lowercase ( self : Optional[int] , A_ : np.ndarray , A_ : float , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : int ) -> np.ndarray: return rescale(A_ , scale=A_ , data_format=A_ , **A_ ) def lowercase ( self : Tuple , A_ : np.ndarray , A_ : Union[float, List[float]] , A_ : Union[float, List[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Tuple , ) -> np.ndarray: return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ ) def lowercase ( self : List[Any] , A_ : ImageInput , A_ : Optional[bool] = None , A_ : Dict[str, int] = None , A_ : PILImageResampling = None , A_ : bool = None , A_ : Dict[str, int] = None , A_ : Optional[bool] = None , A_ : Optional[float] = None , A_ : Optional[bool] = None , A_ : Optional[Union[float, List[float]]] = None , A_ : Optional[Union[float, List[float]]] = None , A_ : Optional[Union[str, TensorType]] = None , A_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A_ : Dict , ) -> Optional[Any]: __snake_case = do_resize if do_resize is not None else self.do_resize __snake_case = size if size is not None else self.size __snake_case = get_size_dict(A_ , default_to_square=A_ ) __snake_case = resample if resample is not None else self.resample __snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop __snake_case = crop_size if crop_size is not None else self.crop_size __snake_case = get_size_dict(A_ , param_name='''crop_size''' ) __snake_case = do_rescale if do_rescale is not None else self.do_rescale __snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor __snake_case = do_normalize if do_normalize is not None else self.do_normalize __snake_case = image_mean if image_mean is not None else self.image_mean __snake_case = image_std if image_std is not None else self.image_std __snake_case = make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. __snake_case = [to_numpy_array(A_ ) for image in images] if do_resize: __snake_case = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images] if do_center_crop: __snake_case = [self.center_crop(image=A_ , size=A_ ) for image in images] if do_rescale: __snake_case = [self.rescale(image=A_ , scale=A_ ) for image in images] if do_normalize: __snake_case = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images] __snake_case = [to_channel_dimension_format(A_ , A_ ) for image in images] __snake_case = {'''pixel_values''': images} return BatchFeature(data=A_ , tensor_type=A_ ) def lowercase ( self : List[str] , A_ : Optional[Any] , A_ : List[Tuple] = None ) -> List[Any]: __snake_case = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(A_ ) != len(A_ ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(A_ ): __snake_case = target_sizes.numpy() __snake_case = [] for idx in range(len(A_ ) ): __snake_case = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=A_ ) __snake_case = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(A_ ) else: __snake_case = logits.argmax(dim=1 ) __snake_case = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
564
1
snake_case : Tuple = [ (1_0_0_0, 'M'), (9_0_0, 'CM'), (5_0_0, 'D'), (4_0_0, 'CD'), (1_0_0, 'C'), (9_0, 'XC'), (5_0, 'L'), (4_0, 'XL'), (1_0, 'X'), (9, 'IX'), (5, 'V'), (4, 'IV'), (1, 'I'), ] def snake_case__ ( __lowercase ) -> int: """simple docstring""" A__ : Optional[int] = {"I": 1, "V": 5, "X": 1_0, "L": 5_0, "C": 1_0_0, "D": 5_0_0, "M": 1_0_0_0} A__ : Any = 0 A__ : List[Any] = 0 while place < len(__lowercase ): if (place + 1 < len(__lowercase )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def snake_case__ ( __lowercase ) -> str: """simple docstring""" A__ : List[str] = [] for arabic, roman in ROMAN: ((A__) , (A__)) : List[Any] = divmod(__lowercase , __lowercase ) result.append(roman * factor ) if number == 0: break return "".join(__lowercase ) if __name__ == "__main__": import doctest doctest.testmod()
182
import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPTaConfig from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 snake_case : List[str] = { 'return_dict': False, 'output_hidden_states': True, 'output_attentions': True, 'torchscript': True, 'torch_dtype': 'float16', 'use_bfloat16': True, 'tf_legacy_loss': True, 'pruned_heads': {'a': 1}, 'tie_word_embeddings': False, 'is_decoder': True, 'cross_attention_hidden_size': 1_2_8, 'add_cross_attention': True, 'tie_encoder_decoder': True, 'max_length': 5_0, 'min_length': 3, 'do_sample': True, 'early_stopping': True, 'num_beams': 3, 'num_beam_groups': 3, 'diversity_penalty': 0.5, 'temperature': 2.0, 'top_k': 1_0, 'top_p': 0.7, 'typical_p': 0.2, 'repetition_penalty': 0.8, 'length_penalty': 0.8, 'no_repeat_ngram_size': 5, 'encoder_no_repeat_ngram_size': 5, 'bad_words_ids': [1, 2, 3], 'num_return_sequences': 3, 'chunk_size_feed_forward': 5, 'output_scores': True, 'return_dict_in_generate': True, 'forced_bos_token_id': 2, 'forced_eos_token_id': 3, 'remove_invalid_values': True, 'architectures': ['BertModel'], 'finetuning_task': 'translation', 'id2label': {0: 'label'}, 'label2id': {'label': '0'}, 'tokenizer_class': 'BertTokenizerFast', 'prefix': 'prefix', 'bos_token_id': 6, 'pad_token_id': 7, 'eos_token_id': 8, 'sep_token_id': 9, 'decoder_start_token_id': 1_0, 'exponential_decay_length_penalty': (5, 1.01), 'suppress_tokens': [0, 1], 'begin_suppress_tokens': 2, 'task_specific_params': {'translation': 'some_params'}, 'problem_type': 'regression', } @is_staging_test class lowerCAmelCase__ ( unittest.TestCase ): @classmethod def _lowercase ( cls : str): A__ : int = TOKEN HfFolder.save_token(_A) @classmethod def _lowercase ( cls : Optional[Any]): try: delete_repo(token=cls._token , repo_id="test-config") except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-config-org") except HTTPError: pass try: delete_repo(token=cls._token , repo_id="test-dynamic-config") except HTTPError: pass def _lowercase ( self : Dict): A__ : List[str] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37) config.push_to_hub("test-config" , use_auth_token=self._token) A__ : Dict = BertConfig.from_pretrained(F'{USER}/test-config') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_A , getattr(_A , _A)) # Reset repo delete_repo(token=self._token , repo_id="test-config") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(_A , repo_id="test-config" , push_to_hub=_A , use_auth_token=self._token) A__ : Tuple = BertConfig.from_pretrained(F'{USER}/test-config') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_A , getattr(_A , _A)) def _lowercase ( self : List[Any]): A__ : List[Any] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37) config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token) A__ : Union[str, Any] = BertConfig.from_pretrained("valid_org/test-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_A , getattr(_A , _A)) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-config-org") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( _A , repo_id="valid_org/test-config-org" , push_to_hub=_A , use_auth_token=self._token) A__ : List[str] = BertConfig.from_pretrained("valid_org/test-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_A , getattr(_A , _A)) def _lowercase ( self : Optional[int]): CustomConfig.register_for_auto_class() A__ : Optional[Any] = CustomConfig(attribute=42) config.push_to_hub("test-dynamic-config" , use_auth_token=self._token) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"}) A__ : Optional[Any] = AutoConfig.from_pretrained(F'{USER}/test-dynamic-config' , trust_remote_code=_A) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__ , "CustomConfig") self.assertEqual(new_config.attribute , 42) class lowerCAmelCase__ ( unittest.TestCase ): def _lowercase ( self : Optional[int]): A__ : List[str] = GPTaConfig() # attempt to modify each of int/float/bool/str config records and verify they were updated A__ : Optional[int] = c.n_embd + 1 # int A__ : Tuple = c.resid_pdrop + 1.0 # float A__ : Any = not c.scale_attn_weights # bool A__ : List[Any] = c.summary_type + "foo" # str c.update_from_string( F'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}') self.assertEqual(_A , c.n_embd , "mismatch for key: n_embd") self.assertEqual(_A , c.resid_pdrop , "mismatch for key: resid_pdrop") self.assertEqual(_A , c.scale_attn_weights , "mismatch for key: scale_attn_weights") self.assertEqual(_A , c.summary_type , "mismatch for key: summary_type") def _lowercase ( self : Optional[int]): A__ : Any = PretrainedConfig() A__ : Optional[Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( _A , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"]) A__ : Optional[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(_A , _A)] if len(_A) > 0: raise ValueError( "The following keys are set with the default values in" " `test_configuration_common.config_common_kwargs` pick another value for them:" F' {", ".join(_A)}.') def _lowercase ( self : List[str]): with self.assertRaises(_A): # config is in subfolder, the following should not work without specifying the subfolder A__ : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder") A__ : str = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert") self.assertIsNotNone(_A) def _lowercase ( self : List[str]): # A mock response for an HTTP head request to emulate server down A__ : Tuple = mock.Mock() A__ : Dict = 500 A__ : Union[str, Any] = {} A__ : List[Any] = HTTPError A__ : Optional[int] = {} # Download this model to make sure it's in the cache. A__ : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request" , return_value=_A) as mock_head: A__ : List[str] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") # This check we did call the fake head request mock_head.assert_called() def _lowercase ( self : Optional[int]): # This test is for deprecated behavior and can be removed in v5 A__ : List[str] = BertConfig.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json") def _lowercase ( self : Union[str, Any]): A__ : Union[str, Any] = AutoConfig.from_pretrained("bert-base-cased") A__ : str = ["config.4.0.0.json"] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(_A) A__ : Dict = 2 json.dump(configuration.to_dict() , open(os.path.join(_A , "config.4.0.0.json") , "w")) # This should pick the new configuration file as the version of Transformers is > 4.0.0 A__ : Any = AutoConfig.from_pretrained(_A) self.assertEqual(new_configuration.hidden_size , 2) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 A__ : str = ["config.42.0.0.json"] A__ : Optional[int] = 768 configuration.save_pretrained(_A) shutil.move(os.path.join(_A , "config.4.0.0.json") , os.path.join(_A , "config.42.0.0.json")) A__ : Optional[int] = AutoConfig.from_pretrained(_A) self.assertEqual(new_configuration.hidden_size , 768) def _lowercase ( self : Optional[int]): # This repo has two configuration files, one for v4.0.0 and above with a different hidden size. A__ : List[str] = "hf-internal-testing/test-two-configs" import transformers as new_transformers A__ : int = "v4.0.0" A__ , A__ : str = new_transformers.models.auto.AutoConfig.from_pretrained( _A , return_unused_kwargs=_A) self.assertEqual(new_configuration.hidden_size , 2) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(_A , {}) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers A__ : List[Any] = "v3.0.0" A__ : Optional[Any] = old_transformers.models.auto.AutoConfig.from_pretrained(_A) self.assertEqual(old_configuration.hidden_size , 768)
182
1
_snake_case = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' _snake_case = [{'type': 'code', 'content': INSTALL_CONTENT}] _snake_case = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
383
from manim import * class lowerCAmelCase_ ( _lowercase ): """simple docstring""" def __lowercase( self ) -> Optional[Any]: __UpperCamelCase = Rectangle(height=0.5 , width=0.5 ) __UpperCamelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 ) __UpperCamelCase = [mem.copy() for i in range(6 )] __UpperCamelCase = [mem.copy() for i in range(6 )] __UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 ) __UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 ) __UpperCamelCase = VGroup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 ) __UpperCamelCase = Text('CPU' , font_size=24 ) __UpperCamelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_SCREAMING_SNAKE_CASE ) __UpperCamelCase = [mem.copy() for i in range(1 )] __UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 ) __UpperCamelCase = Text('GPU' , font_size=24 ) __UpperCamelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE ) gpu.align_to(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) gpu.set_x(gpu.get_x() - 1 ) self.add(_SCREAMING_SNAKE_CASE ) __UpperCamelCase = [mem.copy() for i in range(6 )] __UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 ) __UpperCamelCase = Text('Model' , font_size=24 ) __UpperCamelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE ) model.move_to([3, -1.0, 0] ) self.play( Create(_SCREAMING_SNAKE_CASE , run_time=1 ) , Create(_SCREAMING_SNAKE_CASE , run_time=1 ) , Create(_SCREAMING_SNAKE_CASE , run_time=1 ) , ) __UpperCamelCase = MarkupText( f"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , ) __UpperCamelCase = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) __UpperCamelCase = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(_SCREAMING_SNAKE_CASE , run_time=2.5 ) , Write(_SCREAMING_SNAKE_CASE ) , Write(_SCREAMING_SNAKE_CASE ) ) self.add(_SCREAMING_SNAKE_CASE ) __UpperCamelCase = [] __UpperCamelCase = [] __UpperCamelCase = [] for i, rect in enumerate(_SCREAMING_SNAKE_CASE ): __UpperCamelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(_SCREAMING_SNAKE_CASE , opacity=0.7 ) cpu_target.move_to(_SCREAMING_SNAKE_CASE ) cpu_target.generate_target() __UpperCamelCase = 0.4_6 / 4 __UpperCamelCase = 0.4_6 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=_SCREAMING_SNAKE_CASE ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target , direction=_SCREAMING_SNAKE_CASE , buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_SCREAMING_SNAKE_CASE , buff=0.0 ) cpu_targs.append(_SCREAMING_SNAKE_CASE ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_SCREAMING_SNAKE_CASE ) ) second_animations.append(MoveToTarget(_SCREAMING_SNAKE_CASE , run_time=1.5 ) ) self.play(*_SCREAMING_SNAKE_CASE ) self.play(*_SCREAMING_SNAKE_CASE ) self.wait()
383
1
"""simple docstring""" import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def _snake_case ( snake_case__ : Optional[int] , snake_case__ : List[Any] ): A = checkpoint A = {} A = vae_state_dict['encoder.conv_in.weight'] A = vae_state_dict['encoder.conv_in.bias'] A = vae_state_dict['encoder.conv_out.weight'] A = vae_state_dict['encoder.conv_out.bias'] A = vae_state_dict['encoder.norm_out.weight'] A = vae_state_dict['encoder.norm_out.bias'] A = vae_state_dict['decoder.conv_in.weight'] A = vae_state_dict['decoder.conv_in.bias'] A = vae_state_dict['decoder.conv_out.weight'] A = vae_state_dict['decoder.conv_out.bias'] A = vae_state_dict['decoder.norm_out.weight'] A = vae_state_dict['decoder.norm_out.bias'] A = vae_state_dict['quant_conv.weight'] A = vae_state_dict['quant_conv.bias'] A = vae_state_dict['post_quant_conv.weight'] A = vae_state_dict['post_quant_conv.bias'] # Retrieves the keys for the encoder down blocks only A = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} ) A = { layer_id: [key for key in vae_state_dict if F'down.{layer_id}' in key] for layer_id in range(snake_case__ ) } # Retrieves the keys for the decoder up blocks only A = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} ) A = { layer_id: [key for key in vae_state_dict if F'up.{layer_id}' in key] for layer_id in range(snake_case__ ) } for i in range(snake_case__ ): A = [key for key in down_blocks[i] if F'down.{i}' in key and F'down.{i}.downsample' not in key] if F'encoder.down.{i}.downsample.conv.weight' in vae_state_dict: A = vae_state_dict.pop( F'encoder.down.{i}.downsample.conv.weight' ) A = vae_state_dict.pop( F'encoder.down.{i}.downsample.conv.bias' ) A = renew_vae_resnet_paths(snake_case__ ) A = {'old': F'down.{i}.block', 'new': F'down_blocks.{i}.resnets'} assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ ) A = [key for key in vae_state_dict if 'encoder.mid.block' in key] A = 2 for i in range(1 , num_mid_res_blocks + 1 ): A = [key for key in mid_resnets if F'encoder.mid.block_{i}' in key] A = renew_vae_resnet_paths(snake_case__ ) A = {'old': F'mid.block_{i}', 'new': F'mid_block.resnets.{i - 1}'} assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ ) A = [key for key in vae_state_dict if 'encoder.mid.attn' in key] A = renew_vae_attention_paths(snake_case__ ) A = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'} assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ ) conv_attn_to_linear(snake_case__ ) for i in range(snake_case__ ): A = num_up_blocks - 1 - i A = [ key for key in up_blocks[block_id] if F'up.{block_id}' in key and F'up.{block_id}.upsample' not in key ] if F'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict: A = vae_state_dict[ F'decoder.up.{block_id}.upsample.conv.weight' ] A = vae_state_dict[ F'decoder.up.{block_id}.upsample.conv.bias' ] A = renew_vae_resnet_paths(snake_case__ ) A = {'old': F'up.{block_id}.block', 'new': F'up_blocks.{i}.resnets'} assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ ) A = [key for key in vae_state_dict if 'decoder.mid.block' in key] A = 2 for i in range(1 , num_mid_res_blocks + 1 ): A = [key for key in mid_resnets if F'decoder.mid.block_{i}' in key] A = renew_vae_resnet_paths(snake_case__ ) A = {'old': F'mid.block_{i}', 'new': F'mid_block.resnets.{i - 1}'} assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ ) A = [key for key in vae_state_dict if 'decoder.mid.attn' in key] A = renew_vae_attention_paths(snake_case__ ) A = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'} assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ ) conv_attn_to_linear(snake_case__ ) return new_checkpoint def _snake_case ( snake_case__ : str , snake_case__ : str , ): # Only support V1 A = requests.get( ' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' ) A = io.BytesIO(r.content ) A = OmegaConf.load(snake_case__ ) A = 512 A = 'cuda' if torch.cuda.is_available() else 'cpu' if checkpoint_path.endswith('safetensors' ): from safetensors import safe_open A = {} with safe_open(snake_case__ , framework='pt' , device='cpu' ) as f: for key in f.keys(): A = f.get_tensor(snake_case__ ) else: A = torch.load(snake_case__ , map_location=snake_case__ )['state_dict'] # Convert the VAE model. A = create_vae_diffusers_config(snake_case__ , image_size=snake_case__ ) A = custom_convert_ldm_vae_checkpoint(snake_case__ , snake_case__ ) A = AutoencoderKL(**snake_case__ ) vae.load_state_dict(snake_case__ ) vae.save_pretrained(snake_case__ ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') _lowercase = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
22
"""simple docstring""" from argparse import ArgumentParser from . import BaseTransformersCLICommand def _snake_case ( snake_case__ : Optional[int] ): return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any: A = parser.add_parser('download' ) download_parser.add_argument( '--cache-dir' ,type=A_ ,default=A_ ,help='Path to location to store the models' ) download_parser.add_argument( '--force' ,action='store_true' ,help='Force the model to be download even if already in cache-dir' ) download_parser.add_argument( '--trust-remote-code' ,action='store_true' ,help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' ,) download_parser.add_argument('model' ,type=A_ ,help='Name of the model to download' ) download_parser.set_defaults(func=A_ ) def __init__( self : Dict ,A_ : str ,A_ : str ,A_ : bool ,A_ : bool ) -> Union[str, Any]: A = model A = cache A = force A = trust_remote_code def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
22
1
"""simple docstring""" import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) __UpperCAmelCase = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu') ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation='relu')) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation='relu')) classifier.add(layers.Dense(units=1, activation='sigmoid')) # Compiling the CNN classifier.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') __UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) __UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) __UpperCAmelCase = train_datagen.flow_from_directory( 'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) __UpperCAmelCase = test_datagen.flow_from_directory( 'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save('cnn.h5') # Part 3 - Making new predictions __UpperCAmelCase = tf.keras.preprocessing.image.load_img( 'dataset/single_prediction/image.png', target_size=(64, 64) ) __UpperCAmelCase = tf.keras.preprocessing.image.img_to_array(test_image) __UpperCAmelCase = np.expand_dims(test_image, axis=0) __UpperCAmelCase = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: __UpperCAmelCase = 'Normal' if result[0][0] == 1: __UpperCAmelCase = 'Abnormality detected'
65
"""simple docstring""" import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin __UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model') __UpperCAmelCase = {'target_lang': 'fi', 'source_lang': 'en'} __UpperCAmelCase = '>>zh<<' __UpperCAmelCase = 'Helsinki-NLP/' if is_torch_available(): __UpperCAmelCase = 'pt' elif is_tf_available(): __UpperCAmelCase = 'tf' else: __UpperCAmelCase = 'jax' @require_sentencepiece class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = MarianTokenizer snake_case_ = False snake_case_ = True def __lowercase ( self : Optional[int] ): '''simple docstring''' super().setUp() UpperCAmelCase__ : Optional[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""] UpperCAmelCase__ : int = dict(zip(A ,range(len(A ) ) ) ) UpperCAmelCase__ : Optional[int] = Path(self.tmpdirname ) save_json(A ,save_dir / VOCAB_FILES_NAMES["""vocab"""] ) save_json(A ,save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(A ,save_dir / VOCAB_FILES_NAMES["""source_spm"""] ) copyfile(A ,save_dir / VOCAB_FILES_NAMES["""target_spm"""] ) UpperCAmelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __lowercase ( self : List[Any] ,**A : List[Any] ): '''simple docstring''' return MarianTokenizer.from_pretrained(self.tmpdirname ,**A ) def __lowercase ( self : Union[str, Any] ,A : Tuple ): '''simple docstring''' return ( "This is a test", "This is a test", ) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = """</s>""" UpperCAmelCase__ : int = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) ,A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) ,A ) def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"""</s>""" ) self.assertEqual(vocab_keys[1] ,"""<unk>""" ) self.assertEqual(vocab_keys[-1] ,"""<pad>""" ) self.assertEqual(len(A ) ,9 ) def __lowercase ( self : Dict ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size ,9 ) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = MarianTokenizer.from_pretrained(f"{ORG_NAME}opus-mt-en-de" ) UpperCAmelCase__ : List[str] = en_de_tokenizer(["""I am a small frog"""] ,return_tensors=A ) self.assertIsInstance(A ,A ) UpperCAmelCase__ : str = [38, 121, 14, 697, 38_848, 0] self.assertListEqual(A ,batch.input_ids[0] ) UpperCAmelCase__ : Optional[Any] = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(A ) UpperCAmelCase__ : Tuple = [x.name for x in Path(A ).glob("""*""" )] self.assertIn("""source.spm""" ,A ) MarianTokenizer.from_pretrained(A ) def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.get_tokenizer() UpperCAmelCase__ : Any = tok( ["""I am a small frog""" * 1_000, """I am a small frog"""] ,padding=A ,truncation=A ,return_tensors=A ) self.assertIsInstance(A ,A ) self.assertEqual(batch.input_ids.shape ,(2, 512) ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : int = self.get_tokenizer() UpperCAmelCase__ : Tuple = tok(["""I am a tiny frog""", """I am a small frog"""] ,padding=A ,return_tensors=A ) self.assertIsInstance(A ,A ) self.assertEqual(batch_smaller.input_ids.shape ,(2, 10) ) @slow def __lowercase ( self : Dict ): '''simple docstring''' # fmt: off UpperCAmelCase__ : Optional[int] = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A ,model_name="""Helsinki-NLP/opus-mt-en-de""" ,revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" ,decode_kwargs={"""use_source_tokenizer""": True} ,) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" ) UpperCAmelCase__ : Any = """Tämä on testi""" UpperCAmelCase__ : int = """This is a test""" UpperCAmelCase__ : List[str] = [76, 7, 2_047, 2] UpperCAmelCase__ : Optional[Any] = [69, 12, 11, 940, 2] UpperCAmelCase__ : List[str] = tokenizer(A ).input_ids self.assertListEqual(A ,A ) UpperCAmelCase__ : Optional[int] = tokenizer(text_target=A ).input_ids self.assertListEqual(A ,A ) UpperCAmelCase__ : int = tokenizer.decode(A ,skip_special_tokens=A ) self.assertEqual(A ,A )
65
1
'''simple docstring''' from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class __lowercase ( _lowerCamelCase ): """simple docstring""" def lowerCAmelCase ( self ): __UpperCamelCase : Tuple = SMALL_MODEL_IDENTIFIER __UpperCamelCase : List[str] = 'pt' __UpperCamelCase : List[str] = 'tf' def lowerCAmelCase ( self , _lowerCamelCase ): __UpperCamelCase : Union[str, Any] = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(_lowerCamelCase ) def lowerCAmelCase ( self , _lowerCamelCase ): __UpperCamelCase : Optional[int] = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowerCamelCase ) model_tf.save_pretrained(_lowerCamelCase ) def lowerCAmelCase ( self ): __UpperCamelCase : int = 'mock_framework' # Framework provided - return whatever the user provides __UpperCamelCase : Tuple = FeaturesManager.determine_framework(self.test_model , _lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(_lowerCamelCase ) __UpperCamelCase : int = FeaturesManager.determine_framework(_lowerCamelCase , _lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(_lowerCamelCase ) __UpperCamelCase : Tuple = FeaturesManager.determine_framework(_lowerCamelCase , _lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def lowerCAmelCase ( self ): # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(_lowerCamelCase ) __UpperCamelCase : Any = FeaturesManager.determine_framework(_lowerCamelCase ) self.assertEqual(_lowerCamelCase , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(_lowerCamelCase ) __UpperCamelCase : int = FeaturesManager.determine_framework(_lowerCamelCase ) self.assertEqual(_lowerCamelCase , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(_lowerCamelCase ): __UpperCamelCase : Union[str, Any] = FeaturesManager.determine_framework(_lowerCamelCase ) def lowerCAmelCase ( self ): __UpperCamelCase : Any = MagicMock(return_value=_lowerCamelCase ) with patch('transformers.onnx.features.is_tf_available' , _lowerCamelCase ): __UpperCamelCase : Optional[int] = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_lowerCamelCase , self.framework_pt ) # PyTorch not in environment -> use TensorFlow __UpperCamelCase : List[Any] = MagicMock(return_value=_lowerCamelCase ) with patch('transformers.onnx.features.is_torch_available' , _lowerCamelCase ): __UpperCamelCase : List[str] = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_lowerCamelCase , self.framework_tf ) # Both in environment -> use PyTorch __UpperCamelCase : Any = MagicMock(return_value=_lowerCamelCase ) __UpperCamelCase : Dict = MagicMock(return_value=_lowerCamelCase ) with patch('transformers.onnx.features.is_tf_available' , _lowerCamelCase ), patch( 'transformers.onnx.features.is_torch_available' , _lowerCamelCase ): __UpperCamelCase : Union[str, Any] = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_lowerCamelCase , self.framework_pt ) # Both not in environment -> raise error __UpperCamelCase : Tuple = MagicMock(return_value=_lowerCamelCase ) __UpperCamelCase : str = MagicMock(return_value=_lowerCamelCase ) with patch('transformers.onnx.features.is_tf_available' , _lowerCamelCase ), patch( 'transformers.onnx.features.is_torch_available' , _lowerCamelCase ): with self.assertRaises(_lowerCamelCase ): __UpperCamelCase : Any = FeaturesManager.determine_framework(self.test_model )
287
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a= {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a= [ '''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTMSNModel''', '''ViTMSNForImageClassification''', '''ViTMSNPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys a= _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
287
1
'''simple docstring''' import os import pytest from transformers.dynamic_module_utils import get_imports lowerCAmelCase: str = '\nimport os\n' lowerCAmelCase: int = '\ndef foo():\n import os\n return False\n' lowerCAmelCase: Dict = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n' lowerCAmelCase: List[Any] = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n' lowerCAmelCase: int = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n' lowerCAmelCase: Optional[int] = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n' lowerCAmelCase: Union[str, Any] = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n' lowerCAmelCase: Union[str, Any] = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n' lowerCAmelCase: Optional[Any] = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n' lowerCAmelCase: str = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n' lowerCAmelCase: List[str] = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('case' , _A ) def lowerCamelCase__ ( _A , _A ): a : List[str] = os.path.join(_A , 'test_file.py' ) with open(_A , 'w' ) as _tmp_file: _tmp_file.write(_A ) a : Optional[int] = get_imports(_A ) assert parsed_imports == ["os"]
526
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase: Dict = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: Optional[int] = [ 'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTMAEForPreTraining', 'ViTMAELayer', 'ViTMAEModel', 'ViTMAEPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: List[Any] = [ 'TFViTMAEForPreTraining', 'TFViTMAEModel', 'TFViTMAEPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_mae import ( VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMAEForPreTraining, ViTMAELayer, ViTMAEModel, ViTMAEPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel else: import sys lowerCAmelCase: Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
526
1
'''simple docstring''' import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler lowercase__ = 16 lowercase__ = 32 def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ = 16 , UpperCAmelCase_ = "bert-base-cased" ): UpperCAmelCase : Any = AutoTokenizer.from_pretrained(UpperCAmelCase_ ) UpperCAmelCase : Tuple = load_dataset('glue' , 'mrpc' ) def tokenize_function(UpperCAmelCase_ ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase : Optional[int] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset UpperCAmelCase : Optional[Any] = datasets.map( UpperCAmelCase_ , batched=UpperCAmelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=UpperCAmelCase_ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase : Optional[int] = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(UpperCAmelCase_ ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(UpperCAmelCase_ , padding='max_length' , max_length=1_28 , return_tensors='pt' ) return tokenizer.pad(UpperCAmelCase_ , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. UpperCAmelCase : Any = DataLoader( tokenized_datasets['train'] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ ) UpperCAmelCase : Union[str, Any] = DataLoader( tokenized_datasets['validation'] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ ) return train_dataloader, eval_dataloader def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ): # Initialize accelerator UpperCAmelCase : List[str] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase : Any = config['lr'] UpperCAmelCase : Tuple = int(config['num_epochs'] ) UpperCAmelCase : List[Any] = int(config['seed'] ) UpperCAmelCase : Tuple = int(config['batch_size'] ) UpperCAmelCase : str = args.model_name_or_path set_seed(UpperCAmelCase_ ) UpperCAmelCase : Optional[Any] = get_dataloaders(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase_ , return_dict=UpperCAmelCase_ ) # Instantiate optimizer UpperCAmelCase : Optional[int] = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) UpperCAmelCase : List[str] = optimizer_cls(params=model.parameters() , lr=UpperCAmelCase_ ) if accelerator.state.deepspeed_plugin is not None: UpperCAmelCase : int = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: UpperCAmelCase : str = 1 UpperCAmelCase : List[str] = (len(UpperCAmelCase_ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): UpperCAmelCase : Optional[int] = get_linear_schedule_with_warmup( optimizer=UpperCAmelCase_ , num_warmup_steps=0 , num_training_steps=UpperCAmelCase_ , ) else: UpperCAmelCase : List[Any] = DummyScheduler(UpperCAmelCase_ , total_num_steps=UpperCAmelCase_ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase : Tuple = accelerator.prepare( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # We need to keep track of how many total steps we have iterated over UpperCAmelCase : Optional[Any] = 0 # We also need to keep track of the stating epoch so files are named properly UpperCAmelCase : Optional[int] = 0 # Now we train the model UpperCAmelCase : Union[str, Any] = evaluate.load('glue' , 'mrpc' ) UpperCAmelCase : str = 0 UpperCAmelCase : int = {} for epoch in range(UpperCAmelCase_ , UpperCAmelCase_ ): model.train() for step, batch in enumerate(UpperCAmelCase_ ): UpperCAmelCase : Optional[Any] = model(**UpperCAmelCase_ ) UpperCAmelCase : List[Any] = outputs.loss UpperCAmelCase : int = loss / gradient_accumulation_steps accelerator.backward(UpperCAmelCase_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() UpperCAmelCase : str = 0 for step, batch in enumerate(UpperCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase : Any = model(**UpperCAmelCase_ ) UpperCAmelCase : Optional[Any] = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times UpperCAmelCase : Dict = accelerator.gather( (predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(UpperCAmelCase_ ) - 1: UpperCAmelCase : List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen] UpperCAmelCase : int = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=UpperCAmelCase_ , references=UpperCAmelCase_ , ) UpperCAmelCase : str = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , UpperCAmelCase_ ) UpperCAmelCase : Any = eval_metric['accuracy'] if best_performance < eval_metric["accuracy"]: UpperCAmelCase : Optional[int] = eval_metric['accuracy'] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), F"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}""" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f: json.dump(UpperCAmelCase_ , UpperCAmelCase_ ) def UpperCamelCase( ): UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=UpperCAmelCase_ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=UpperCAmelCase_ , ) parser.add_argument( '--output_dir' , type=UpperCAmelCase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--performance_lower_bound' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , ) parser.add_argument( '--num_epochs' , type=UpperCAmelCase_ , default=3 , help='Number of train epochs.' , ) UpperCAmelCase : str = parser.parse_args() UpperCAmelCase : str = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(UpperCAmelCase_ , UpperCAmelCase_ ) if __name__ == "__main__": main()
702
'''simple docstring''' from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar lowercase__ = TypeVar("T") class A_ ( Generic[T] ): '''simple docstring''' UpperCAmelCase_ : deque[T] # Cache store of keys UpperCAmelCase_ : set[T] # References of the keys in cache UpperCAmelCase_ : int = 10 # Maximum capacity of cache def __init__( self : List[Any] , lowercase_ : int ) -> None: UpperCAmelCase : Any = deque() UpperCAmelCase : Dict = set() if not n: UpperCAmelCase : Optional[int] = sys.maxsize elif n < 0: raise ValueError('n should be an integer greater than 0.' ) else: UpperCAmelCase : str = n def UpperCAmelCase_ ( self : List[str] , lowercase_ : T ) -> None: if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: UpperCAmelCase : Optional[Any] = self.dq_store.pop() self.key_reference.remove(lowercase_ ) else: self.dq_store.remove(lowercase_ ) self.dq_store.appendleft(lowercase_ ) self.key_reference.add(lowercase_ ) def UpperCAmelCase_ ( self : Dict ) -> None: for k in self.dq_store: print(lowercase_ ) def __repr__( self : Union[str, Any] ) -> str: return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}""" if __name__ == "__main__": import doctest doctest.testmod() lowercase__ = LRUCache(4) lru_cache.refer("A") lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer("A") lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
695
0
"""simple docstring""" import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase : '''simple docstring''' def __init__( self : Optional[Any] , _snake_case : Optional[int] , _snake_case : Tuple=13 , _snake_case : Tuple=7 , _snake_case : Any=True , _snake_case : Optional[int]=True , _snake_case : Tuple=True , _snake_case : List[str]=True , _snake_case : Optional[int]=True , _snake_case : Any=False , _snake_case : Optional[Any]=False , _snake_case : List[Any]=False , _snake_case : Union[str, Any]=2 , _snake_case : Optional[Any]=99 , _snake_case : Union[str, Any]=0 , _snake_case : Union[str, Any]=32 , _snake_case : Union[str, Any]=5 , _snake_case : List[str]=4 , _snake_case : Any=0.1 , _snake_case : List[Any]=0.1 , _snake_case : Any=512 , _snake_case : int=2 , _snake_case : Tuple=0.02 , _snake_case : Dict=2 , _snake_case : int=4 , _snake_case : List[str]="last" , _snake_case : Optional[Any]=True , _snake_case : Optional[int]=None , _snake_case : List[str]=0 , ) -> Dict: SCREAMING_SNAKE_CASE__ = parent SCREAMING_SNAKE_CASE__ = batch_size SCREAMING_SNAKE_CASE__ = seq_length SCREAMING_SNAKE_CASE__ = is_training SCREAMING_SNAKE_CASE__ = use_input_lengths SCREAMING_SNAKE_CASE__ = use_token_type_ids SCREAMING_SNAKE_CASE__ = use_labels SCREAMING_SNAKE_CASE__ = gelu_activation SCREAMING_SNAKE_CASE__ = sinusoidal_embeddings SCREAMING_SNAKE_CASE__ = causal SCREAMING_SNAKE_CASE__ = asm SCREAMING_SNAKE_CASE__ = n_langs SCREAMING_SNAKE_CASE__ = vocab_size SCREAMING_SNAKE_CASE__ = n_special SCREAMING_SNAKE_CASE__ = hidden_size SCREAMING_SNAKE_CASE__ = num_hidden_layers SCREAMING_SNAKE_CASE__ = num_attention_heads SCREAMING_SNAKE_CASE__ = hidden_dropout_prob SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ = max_position_embeddings SCREAMING_SNAKE_CASE__ = type_sequence_label_size SCREAMING_SNAKE_CASE__ = initializer_range SCREAMING_SNAKE_CASE__ = num_labels SCREAMING_SNAKE_CASE__ = num_choices SCREAMING_SNAKE_CASE__ = summary_type SCREAMING_SNAKE_CASE__ = use_proj SCREAMING_SNAKE_CASE__ = scope SCREAMING_SNAKE_CASE__ = bos_token_id def lowerCAmelCase_ ( self : Any ) -> Optional[int]: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ = None if self.use_input_lengths: SCREAMING_SNAKE_CASE__ = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length SCREAMING_SNAKE_CASE__ = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None if self.use_labels: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , 2 ).float() SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE__ = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowerCAmelCase_ ( self : Dict ) -> List[Any]: return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def lowerCAmelCase_ ( self : List[Any] , _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Any , ) -> Optional[int]: SCREAMING_SNAKE_CASE__ = XLMModel(config=_snake_case ) model.to(_snake_case ) model.eval() SCREAMING_SNAKE_CASE__ = model(_snake_case , lengths=_snake_case , langs=_snake_case ) SCREAMING_SNAKE_CASE__ = model(_snake_case , langs=_snake_case ) SCREAMING_SNAKE_CASE__ = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self : Optional[Any] , _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : Dict , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : str , _snake_case : Optional[Any] , ) -> Any: SCREAMING_SNAKE_CASE__ = XLMWithLMHeadModel(_snake_case ) model.to(_snake_case ) model.eval() SCREAMING_SNAKE_CASE__ = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self : Dict , _snake_case : Dict , _snake_case : str , _snake_case : List[Any] , _snake_case : List[Any] , _snake_case : str , _snake_case : str , _snake_case : List[str] , _snake_case : int , _snake_case : List[str] , ) -> Tuple: SCREAMING_SNAKE_CASE__ = XLMForQuestionAnsweringSimple(_snake_case ) model.to(_snake_case ) model.eval() SCREAMING_SNAKE_CASE__ = model(_snake_case ) SCREAMING_SNAKE_CASE__ = model(_snake_case , start_positions=_snake_case , end_positions=_snake_case ) SCREAMING_SNAKE_CASE__ = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : Tuple , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : int , _snake_case : int , _snake_case : str , _snake_case : Optional[Any] , _snake_case : str , _snake_case : Tuple , ) -> Tuple: SCREAMING_SNAKE_CASE__ = XLMForQuestionAnswering(_snake_case ) model.to(_snake_case ) model.eval() SCREAMING_SNAKE_CASE__ = model(_snake_case ) SCREAMING_SNAKE_CASE__ = model( _snake_case , start_positions=_snake_case , end_positions=_snake_case , cls_index=_snake_case , is_impossible=_snake_case , p_mask=_snake_case , ) SCREAMING_SNAKE_CASE__ = model( _snake_case , start_positions=_snake_case , end_positions=_snake_case , cls_index=_snake_case , is_impossible=_snake_case , ) ((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple() SCREAMING_SNAKE_CASE__ = model(_snake_case , start_positions=_snake_case , end_positions=_snake_case ) ((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def lowerCAmelCase_ ( self : List[str] , _snake_case : Any , _snake_case : Dict , _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Dict , ) -> Any: SCREAMING_SNAKE_CASE__ = XLMForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() SCREAMING_SNAKE_CASE__ = model(_snake_case ) SCREAMING_SNAKE_CASE__ = model(_snake_case , labels=_snake_case ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase_ ( self : Any , _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Any , _snake_case : str , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Tuple , _snake_case : int , ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ = self.num_labels SCREAMING_SNAKE_CASE__ = XLMForTokenClassification(_snake_case ) model.to(_snake_case ) model.eval() SCREAMING_SNAKE_CASE__ = model(_snake_case , attention_mask=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Dict , _snake_case : Any , _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : Union[str, Any] , ) -> Tuple: SCREAMING_SNAKE_CASE__ = self.num_choices SCREAMING_SNAKE_CASE__ = XLMForMultipleChoice(config=_snake_case ) model.to(_snake_case ) model.eval() SCREAMING_SNAKE_CASE__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ = model( _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self : Optional[Any] ) -> str: SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) = config_and_inputs SCREAMING_SNAKE_CASE__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths} return config, inputs_dict @require_torch class lowerCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) a = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable a = ( { "feature-extraction": XLMModel, "fill-mask": XLMWithLMHeadModel, "question-answering": XLMForQuestionAnsweringSimple, "text-classification": XLMForSequenceClassification, "text-generation": XLMWithLMHeadModel, "token-classification": XLMForTokenClassification, "zero-shot": XLMForSequenceClassification, } if is_torch_available() else {} ) def lowerCAmelCase_ ( self : Tuple , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : Any , _snake_case : Tuple , _snake_case : str ) -> Dict: if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowerCAmelCase_ ( self : Dict , _snake_case : Any , _snake_case : str , _snake_case : Optional[Any]=False ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": SCREAMING_SNAKE_CASE__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_snake_case ) SCREAMING_SNAKE_CASE__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_snake_case ) return inputs_dict def lowerCAmelCase_ ( self : int ) -> int: SCREAMING_SNAKE_CASE__ = XLMModelTester(self ) SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=_snake_case , emb_dim=37 ) def lowerCAmelCase_ ( self : Optional[int] ) -> List[Any]: self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : List[Any] ) -> str: SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*_snake_case ) def lowerCAmelCase_ ( self : Union[str, Any] ) -> Dict: SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*_snake_case ) def lowerCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*_snake_case ) def lowerCAmelCase_ ( self : int ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*_snake_case ) def lowerCAmelCase_ ( self : Any ) -> Dict: SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*_snake_case ) def lowerCAmelCase_ ( self : Dict ) -> Tuple: SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*_snake_case ) def lowerCAmelCase_ ( self : str ) -> List[Any]: SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*_snake_case ) def lowerCAmelCase_ ( self : str , _snake_case : Optional[int] , _snake_case : Dict , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Any=False , _snake_case : Tuple=1 ) -> Optional[Any]: self.assertIsInstance(_snake_case , _snake_case ) self.assertListEqual( [isinstance(_snake_case , _snake_case ) for iter_attentions in attentions] , [True] * len(_snake_case ) ) self.assertEqual(len(_snake_case ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(_snake_case ): # adds PAD dummy token SCREAMING_SNAKE_CASE__ = min_length + idx + 1 SCREAMING_SNAKE_CASE__ = min_length + idx + 1 SCREAMING_SNAKE_CASE__ = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_snake_case ) ) def lowerCAmelCase_ ( self : Any , _snake_case : Any , _snake_case : Dict , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : str , _snake_case : Any=False , _snake_case : Tuple=1 ) -> Dict: self.assertIsInstance(_snake_case , _snake_case ) self.assertListEqual( [isinstance(_snake_case , _snake_case ) for iter_hidden_states in hidden_states] , [True] * len(_snake_case ) , ) self.assertEqual(len(_snake_case ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(_snake_case ): # adds PAD dummy token SCREAMING_SNAKE_CASE__ = min_length + idx + 1 SCREAMING_SNAKE_CASE__ = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_snake_case ) , ) pass @slow def lowerCAmelCase_ ( self : List[str] ) -> Any: for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ = XLMModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) @require_torch class lowerCamelCase (unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase_ ( self : Any ) -> List[str]: SCREAMING_SNAKE_CASE__ = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" ) model.to(_snake_case ) SCREAMING_SNAKE_CASE__ = torch.tensor([[14, 447]] , dtype=torch.long , device=_snake_case ) # the president SCREAMING_SNAKE_CASE__ = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference SCREAMING_SNAKE_CASE__ = model.generate(_snake_case , do_sample=_snake_case ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _snake_case )
159
"""simple docstring""" from decimal import Decimal, getcontext from math import ceil, factorial def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> str: if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise TypeError("Undefined for non-integers" ) elif precision < 1: raise ValueError("Undefined for non-natural numbers" ) SCREAMING_SNAKE_CASE__ = precision SCREAMING_SNAKE_CASE__ = ceil(precision / 14 ) SCREAMING_SNAKE_CASE__ = 426_880 * Decimal(10_005 ).sqrt() SCREAMING_SNAKE_CASE__ = 1 SCREAMING_SNAKE_CASE__ = 13_591_409 SCREAMING_SNAKE_CASE__ = Decimal(__UpperCAmelCase ) for k in range(1 , __UpperCAmelCase ): SCREAMING_SNAKE_CASE__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(__UpperCAmelCase ) ** 3) linear_term += 545_140_134 exponential_term *= -262_537_412_640_768_000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": _A = 5_0 print(F'The first {n} digits of pi is: {pi(n)}')
159
1
'''simple docstring''' lowercase__ : List[Any] = '''Input must be a string of 8 numbers plus letter''' lowercase__ : Optional[Any] = '''TRWAGMYFPDXBNJZSQVHLCKE''' def _lowerCAmelCase ( __snake_case : str ) -> bool: if not isinstance(__snake_case , __snake_case ): __A : str = f'Expected string as input, found {type(__snake_case ).__name__}' raise TypeError(__snake_case ) __A : Any = spanish_id.replace('-' , '' ).upper() if len(__snake_case ) != 9: raise ValueError(__snake_case ) try: __A : Optional[Any] = int(spanish_id_clean[0:8] ) __A : int = spanish_id_clean[8] except ValueError as ex: raise ValueError(__snake_case ) from ex if letter.isdigit(): raise ValueError(__snake_case ) return letter == LOOKUP_LETTERS[number % 23] if __name__ == "__main__": import doctest doctest.testmod()
709
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) lowercase__ : str = { '''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''], '''processing_speech_to_text''': ['''Speech2TextProcessor'''], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Dict = ['''Speech2TextTokenizer'''] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Optional[int] = ['''Speech2TextFeatureExtractor'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : List[str] = [ '''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFSpeech2TextForConditionalGeneration''', '''TFSpeech2TextModel''', '''TFSpeech2TextPreTrainedModel''', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : str = [ '''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Speech2TextForConditionalGeneration''', '''Speech2TextModel''', '''Speech2TextPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys lowercase__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
338
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) a_ : int = { 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Any = ['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Any = [ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys a_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
623
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" def _a (self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _a (self ): '''simple docstring''' lowerCamelCase = 1 lowerCamelCase = 3 lowerCamelCase = (32, 32) lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a ) return image @property def _a (self ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase = UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__a , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , ) return model @property def _a (self ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase = AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) return model @property def _a (self ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , ) return CLIPTextModel(__a ) def _a (self ): '''simple docstring''' lowerCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator lowerCamelCase = self.dummy_cond_unet_upscale lowerCamelCase = DDPMScheduler() lowerCamelCase = DDIMScheduler(prediction_type="v_prediction" ) lowerCamelCase = self.dummy_vae lowerCamelCase = self.dummy_text_encoder lowerCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) lowerCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk lowerCamelCase = StableDiffusionUpscalePipeline( unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=3_50 , ) lowerCamelCase = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) lowerCamelCase = "A painting of a squirrel eating a burger" lowerCamelCase = torch.Generator(device=__a ).manual_seed(0 ) lowerCamelCase = sd_pipe( [prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , ) lowerCamelCase = output.images lowerCamelCase = torch.Generator(device=__a ).manual_seed(0 ) lowerCamelCase = sd_pipe( [prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=__a , )[0] lowerCamelCase = image[0, -3:, -3:, -1] lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] lowerCamelCase = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) lowerCamelCase = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _a (self ): '''simple docstring''' lowerCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator lowerCamelCase = self.dummy_cond_unet_upscale lowerCamelCase = DDPMScheduler() lowerCamelCase = DDIMScheduler(prediction_type="v_prediction" ) lowerCamelCase = self.dummy_vae lowerCamelCase = self.dummy_text_encoder lowerCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) lowerCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk lowerCamelCase = StableDiffusionUpscalePipeline( unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=3_50 , ) lowerCamelCase = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) lowerCamelCase = "A painting of a squirrel eating a burger" lowerCamelCase = sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , ) lowerCamelCase = output.images assert image.shape[0] == 2 lowerCamelCase = torch.Generator(device=__a ).manual_seed(0 ) lowerCamelCase = sd_pipe( [prompt] , image=__a , generator=__a , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , ) lowerCamelCase = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def _a (self ): '''simple docstring''' lowerCamelCase = self.dummy_cond_unet_upscale lowerCamelCase = DDPMScheduler() lowerCamelCase = DDIMScheduler(prediction_type="v_prediction" ) lowerCamelCase = self.dummy_vae lowerCamelCase = self.dummy_text_encoder lowerCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) lowerCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 lowerCamelCase = unet.half() lowerCamelCase = text_encoder.half() # make sure here that pndm scheduler skips prk lowerCamelCase = StableDiffusionUpscalePipeline( unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=3_50 , ) lowerCamelCase = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) lowerCamelCase = "A painting of a squirrel eating a burger" lowerCamelCase = torch.manual_seed(0 ) lowerCamelCase = sd_pipe( [prompt] , image=__a , generator=__a , num_inference_steps=2 , output_type="np" , ).images lowerCamelCase = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" def _a (self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _a (self ): '''simple docstring''' lowerCamelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) lowerCamelCase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" "/upsampled_cat.npy" ) lowerCamelCase = "stabilityai/stable-diffusion-x4-upscaler" lowerCamelCase = StableDiffusionUpscalePipeline.from_pretrained(__a ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() lowerCamelCase = "a cat sitting on a park bench" lowerCamelCase = torch.manual_seed(0 ) lowerCamelCase = pipe( prompt=__a , image=__a , generator=__a , output_type="np" , ) lowerCamelCase = output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 1E-3 def _a (self ): '''simple docstring''' lowerCamelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) lowerCamelCase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" "/upsampled_cat_fp16.npy" ) lowerCamelCase = "stabilityai/stable-diffusion-x4-upscaler" lowerCamelCase = StableDiffusionUpscalePipeline.from_pretrained( __a , torch_dtype=torch.floataa , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() lowerCamelCase = "a cat sitting on a park bench" lowerCamelCase = torch.manual_seed(0 ) lowerCamelCase = pipe( prompt=__a , image=__a , generator=__a , output_type="np" , ) lowerCamelCase = output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 5E-1 def _a (self ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCamelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) lowerCamelCase = "stabilityai/stable-diffusion-x4-upscaler" lowerCamelCase = StableDiffusionUpscalePipeline.from_pretrained( __a , torch_dtype=torch.floataa , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowerCamelCase = "a cat sitting on a park bench" lowerCamelCase = torch.manual_seed(0 ) lowerCamelCase = pipe( prompt=__a , image=__a , generator=__a , num_inference_steps=5 , output_type="np" , ) lowerCamelCase = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
623
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available A__ : Dict = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : str = ['MLukeTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys A__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
701
"""simple docstring""" import comet # From: unbabel-comet import torch import datasets A__ : int = datasets.logging.get_logger(__name__) A__ : Optional[Any] = '\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = "{COMET}: A Neural Framework for {MT} Evaluation",\n author = "Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon",\n booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",\n month = nov,\n year = "2020",\n address = "Online",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",\n pages = "2685--2702",\n}\n' A__ : Union[str, Any] = '\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n' A__ : int = '\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric(\'comet\')\n >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use\n >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]\n >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]\n >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results["scores"]])\n [0.19, 0.92]\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ ( datasets.Metric ): def lowercase_ ( self ) -> Tuple: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''sources''': datasets.Value('''string''' , id='''sequence''' ), '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[ '''https://github.com/Unbabel/COMET''', '''https://www.aclweb.org/anthology/2020.emnlp-main.213/''', '''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''', ] , ) def lowercase_ ( self , A_ ) -> Dict: """simple docstring""" if self.config_name == "default": _lowercase: str = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) ) else: _lowercase: Optional[Any] = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def lowercase_ ( self , A_ , A_ , A_ , A_=None , A_=False ) -> List[str]: """simple docstring""" if gpus is None: _lowercase: Union[str, Any] = 1 if torch.cuda.is_available() else 0 _lowercase: Any = {'''src''': sources, '''mt''': predictions, '''ref''': references} _lowercase: int = [dict(zip(A_ , A_ ) ) for t in zip(*data.values() )] _lowercase , _lowercase: Tuple = self.scorer.predict(A_ , gpus=A_ , progress_bar=A_ ) return {"mean_score": mean_score, "scores": scores}
272
0
'''simple docstring''' def SCREAMING_SNAKE_CASE ( lowercase_ : int = 1000 ): return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
588
'''simple docstring''' import operator def SCREAMING_SNAKE_CASE ( lowercase_ : list , lowercase_ : bool = False , lowercase_ : list | None = None ): lowercase = operator.lt if reverse else operator.gt lowercase = solution or [] if not arr: return solution lowercase = [arr.pop(0 )] for i, item in enumerate(lowercase_ ): if _operator(lowercase_ , sublist[-1] ): sublist.append(lowercase_ ) arr.pop(lowercase_ ) # merging sublist into solution list if not solution: solution.extend(lowercase_ ) else: while sublist: lowercase = sublist.pop(0 ) for i, xx in enumerate(lowercase_ ): if not _operator(lowercase_ , lowercase_ ): solution.insert(lowercase_ , lowercase_ ) break else: solution.append(lowercase_ ) strand_sort(lowercase_ , lowercase_ , lowercase_ ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
588
1
'''simple docstring''' def UpperCamelCase( UpperCAmelCase_ ): if isinstance(__UpperCamelCase , __UpperCamelCase ): raise TypeError('\'float\' object cannot be interpreted as an integer' ) if isinstance(__UpperCamelCase , __UpperCamelCase ): raise TypeError('\'str\' object cannot be interpreted as an integer' ) if num == 0: return "0b0" UpperCAmelCase : List[str] = False if num < 0: UpperCAmelCase : Union[str, Any] = True UpperCAmelCase : Any = -num UpperCAmelCase : Dict = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(__UpperCamelCase ) for e in binary ) return "0b" + "".join(str(__UpperCamelCase ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
708
'''simple docstring''' import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) lowercase__ = logging.getLogger(__name__) @dataclass(frozen=_snake_case ) class A_ : '''simple docstring''' UpperCAmelCase_ : str UpperCAmelCase_ : str UpperCAmelCase_ : Optional[str] = None UpperCAmelCase_ : Optional[str] = None UpperCAmelCase_ : Optional[str] = None @dataclass(frozen=_snake_case ) class A_ : '''simple docstring''' UpperCAmelCase_ : List[int] UpperCAmelCase_ : Optional[List[int]] = None UpperCAmelCase_ : Optional[List[int]] = None UpperCAmelCase_ : Optional[Union[int, float]] = None UpperCAmelCase_ : Optional[int] = None if is_torch_available(): import torch from torch.utils.data import Dataset class A_ ( _snake_case ): '''simple docstring''' UpperCAmelCase_ : List[InputFeatures] def __init__( self : List[str] , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = None , lowercase_ : List[str]=False , lowercase_ : bool = False , ) -> Optional[Any]: UpperCAmelCase : Dict = hans_processors[task]() UpperCAmelCase : List[Any] = os.path.join( lowercase_ , 'cached_{}_{}_{}_{}'.format( 'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(lowercase_ ) , lowercase_ , ) , ) UpperCAmelCase : Optional[int] = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1] UpperCAmelCase : Optional[int] = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. UpperCAmelCase : int = cached_features_file + '.lock' with FileLock(lowercase_ ): if os.path.exists(lowercase_ ) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""" ) UpperCAmelCase : Tuple = torch.load(lowercase_ ) else: logger.info(f"""Creating features from dataset file at {data_dir}""" ) UpperCAmelCase : int = ( processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ ) ) logger.info('Training examples: %s' , len(lowercase_ ) ) UpperCAmelCase : Dict = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) logger.info('Saving features into cached file %s' , lowercase_ ) torch.save(self.features , lowercase_ ) def __len__( self : Union[str, Any] ) -> str: return len(self.features ) def __getitem__( self : Dict , lowercase_ : Dict ) -> InputFeatures: return self.features[i] def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]: return self.label_list if is_tf_available(): import tensorflow as tf class A_ : '''simple docstring''' UpperCAmelCase_ : List[InputFeatures] def __init__( self : Tuple , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = 128 , lowercase_ : int=False , lowercase_ : bool = False , ) -> Union[str, Any]: UpperCAmelCase : int = hans_processors[task]() UpperCAmelCase : Optional[int] = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1] UpperCAmelCase : Any = label_list UpperCAmelCase : str = processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ ) UpperCAmelCase : int = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ): if ex_index % 10_000 == 0: logger.info('Writing example %d of %d' % (ex_index, len(lowercase_ )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) UpperCAmelCase : Optional[Any] = tf.data.Dataset.from_generator( lowercase_ , ( { 'example_id': tf.intaa, 'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa, }, tf.intaa, ) , ( { 'example_id': tf.TensorShape([] ), 'input_ids': tf.TensorShape([None, None] ), 'attention_mask': tf.TensorShape([None, None] ), 'token_type_ids': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict: return self.dataset def __len__( self : Tuple ) -> Optional[Any]: return len(self.features ) def __getitem__( self : List[Any] , lowercase_ : Union[str, Any] ) -> InputFeatures: return self.features[i] def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]: return self.label_list class A_ ( _snake_case ): '''simple docstring''' def UpperCAmelCase_ ( self : int , lowercase_ : Optional[int] ) -> Any: return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_train_set.txt' ) ) , 'train' ) def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> List[str]: return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_evaluation_set.txt' ) ) , 'dev' ) def UpperCAmelCase_ ( self : str ) -> Optional[int]: return ["contradiction", "entailment", "neutral"] def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : str ) -> Dict: UpperCAmelCase : Union[str, Any] = [] for i, line in enumerate(lowercase_ ): if i == 0: continue UpperCAmelCase : Tuple = '%s-%s' % (set_type, line[0]) UpperCAmelCase : Tuple = line[5] UpperCAmelCase : Dict = line[6] UpperCAmelCase : Optional[Any] = line[7][2:] if line[7].startswith('ex' ) else line[7] UpperCAmelCase : Optional[Any] = line[0] examples.append(InputExample(guid=lowercase_ , text_a=lowercase_ , text_b=lowercase_ , label=lowercase_ , pairID=lowercase_ ) ) return examples def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ): UpperCAmelCase : List[str] = {label: i for i, label in enumerate(UpperCAmelCase_ )} UpperCAmelCase : Optional[Any] = [] for ex_index, example in tqdm.tqdm(enumerate(UpperCAmelCase_ ) , desc='convert examples to features' ): if ex_index % 1_00_00 == 0: logger.info('Writing example %d' % (ex_index) ) UpperCAmelCase : int = tokenizer( example.text_a , example.text_b , add_special_tokens=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , truncation=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , ) UpperCAmelCase : List[str] = label_map[example.label] if example.label in label_map else 0 UpperCAmelCase : Any = int(example.pairID ) features.append(InputFeatures(**UpperCAmelCase_ , label=UpperCAmelCase_ , pairID=UpperCAmelCase_ ) ) for i, example in enumerate(examples[:5] ): logger.info('*** Example ***' ) logger.info(F"""guid: {example}""" ) logger.info(F"""features: {features[i]}""" ) return features lowercase__ = { "hans": 3, } lowercase__ = { "hans": HansProcessor, }
695
0
'''simple docstring''' from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record __lowerCAmelCase = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n' __lowerCAmelCase = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n' __lowerCAmelCase = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n' def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return float((preds == labels).mean() ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="binary" ): _snake_case = simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _snake_case = float(fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=_SCREAMING_SNAKE_CASE , average=_SCREAMING_SNAKE_CASE ) ) return { "accuracy": acc, "f1": fa, } def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = {} for id_pred, label in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = f"""{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}""" _snake_case = id_pred["""prediction"""] if question_id in question_map: question_map[question_id].append((pred, label) ) else: _snake_case = [(pred, label)] _snake_case, _snake_case = [], [] for question, preds_labels in question_map.items(): _snake_case, _snake_case = zip(*_SCREAMING_SNAKE_CASE ) _snake_case = fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=_SCREAMING_SNAKE_CASE , average="""macro""" ) fas.append(_SCREAMING_SNAKE_CASE ) _snake_case = int(sum(pred == label for pred, label in preds_labels ) == len(_SCREAMING_SNAKE_CASE ) ) ems.append(_SCREAMING_SNAKE_CASE ) _snake_case = float(sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE ) ) _snake_case = sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE ) _snake_case = float(fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): '''simple docstring''' def lowercase (self ) -> int: if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , ) def lowercase (self ) -> Dict: if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "prediction_text": datasets.Value("""string""" ), }, "references": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "answers": datasets.Sequence(datasets.Value("""string""" ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value("""int64""" ), "paragraph": datasets.Value("""int64""" ), "question": datasets.Value("""int64""" ), }, "prediction": datasets.Value("""int64""" ), }, "references": datasets.Value("""int64""" ), } else: return { "predictions": datasets.Value("""int64""" ), "references": datasets.Value("""int64""" ), } def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]: if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase , UpperCAmelCase )} elif self.config_name == "cb": return acc_and_fa(UpperCAmelCase , UpperCAmelCase , fa_avg="""macro""" ) elif self.config_name == "record": _snake_case = [ { """qas""": [ {"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]} for ref in references ] } ] _snake_case = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions} return evaluate_record(UpperCAmelCase , UpperCAmelCase )[0] elif self.config_name == "multirc": return evaluate_multirc(UpperCAmelCase , UpperCAmelCase ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(UpperCAmelCase , UpperCAmelCase )} else: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
585
'''simple docstring''' from __future__ import annotations from PIL import Image # Define glider example __lowerCAmelCase = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example __lowerCAmelCase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = [] for i in range(len(_SCREAMING_SNAKE_CASE ) ): _snake_case = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours _snake_case = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(_SCREAMING_SNAKE_CASE ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(_SCREAMING_SNAKE_CASE ) - 1: neighbour_count += cells[i + 1][j] if i < len(_SCREAMING_SNAKE_CASE ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. _snake_case = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(_SCREAMING_SNAKE_CASE ) return next_generation def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = [] for _ in range(_SCREAMING_SNAKE_CASE ): # Create output image _snake_case = Image.new("""RGB""" , (len(cells[0] ), len(_SCREAMING_SNAKE_CASE )) ) _snake_case = img.load() # Save cells to image for x in range(len(_SCREAMING_SNAKE_CASE ) ): for y in range(len(cells[0] ) ): _snake_case = 255 - cells[y][x] * 255 _snake_case = (colour, colour, colour) # Save image images.append(_SCREAMING_SNAKE_CASE ) _snake_case = new_generation(_SCREAMING_SNAKE_CASE ) return images if __name__ == "__main__": __lowerCAmelCase = generate_images(GLIDER, 16) images[0].save('out.gif', save_all=True, append_images=images[1:])
585
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { 'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json', 'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json', 'junnyu/roformer_chinese_char_small': ( 'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json' ), 'junnyu/roformer_chinese_char_base': ( 'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json' ), 'junnyu/roformer_small_discriminator': ( 'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json' ), 'junnyu/roformer_small_generator': ( 'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class a ( __a ): UpperCamelCase : int = """roformer""" def __init__( self , UpperCamelCase_=50_000 , UpperCamelCase_=None , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3_072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=1_536 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1E-12 , UpperCamelCase_=0 , UpperCamelCase_=False , UpperCamelCase_=True , **UpperCamelCase_ , ): super().__init__(pad_token_id=a_ , **a_ ) UpperCAmelCase__ : int = vocab_size UpperCAmelCase__ : int = hidden_size if embedding_size is None else embedding_size UpperCAmelCase__ : Dict = hidden_size UpperCAmelCase__ : Optional[int] = num_hidden_layers UpperCAmelCase__ : Any = num_attention_heads UpperCAmelCase__ : Dict = hidden_act UpperCAmelCase__ : Optional[int] = intermediate_size UpperCAmelCase__ : List[Any] = hidden_dropout_prob UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase__ : Any = max_position_embeddings UpperCAmelCase__ : Tuple = type_vocab_size UpperCAmelCase__ : List[Any] = initializer_range UpperCAmelCase__ : List[Any] = layer_norm_eps UpperCAmelCase__ : Optional[Any] = rotary_value UpperCAmelCase__ : List[str] = use_cache class a ( __a ): @property def __snake_case ( self ): if self.task == "multiple-choice": UpperCAmelCase__ : str = {0: """batch""", 1: """choice""", 2: """sequence"""} else: UpperCAmelCase__ : List[str] = {0: """batch""", 1: """sequence"""} UpperCAmelCase__ : List[Any] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
706
"""simple docstring""" from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class a ( lowercase , lowercase , lowercase ): UpperCamelCase : Any = [r"""h\.\d+\.attn\.bias""", r"""h\.\d+\.attn\.masked_bias"""] @register_to_config def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = 50_257 , UpperCamelCase_ = 1_024 , UpperCamelCase_ = 768 , UpperCamelCase_ = 12 , UpperCamelCase_ = 12 , UpperCamelCase_ = None , UpperCamelCase_ = "gelu_new" , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 1E-5 , UpperCamelCase_ = 0.02 , UpperCamelCase_ = True , UpperCamelCase_ = True , UpperCamelCase_ = False , UpperCamelCase_ = False , ): super().__init__() UpperCAmelCase__ : List[Any] = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and''' F''' `n_embd`: {n_embd} are not equal.''' ) UpperCAmelCase__ : Dict = prefix_inner_dim UpperCAmelCase__ : List[Any] = prefix_hidden_dim UpperCAmelCase__ : List[Any] = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) UpperCAmelCase__ : Any = ( nn.Linear(self.prefix_hidden_dim , UpperCamelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity() ) UpperCAmelCase__ : Union[str, Any] = GPTaConfig( vocab_size=UpperCamelCase_ , n_positions=UpperCamelCase_ , n_embd=UpperCamelCase_ , n_layer=UpperCamelCase_ , n_head=UpperCamelCase_ , n_inner=UpperCamelCase_ , activation_function=UpperCamelCase_ , resid_pdrop=UpperCamelCase_ , embd_pdrop=UpperCamelCase_ , attn_pdrop=UpperCamelCase_ , layer_norm_epsilon=UpperCamelCase_ , initializer_range=UpperCamelCase_ , scale_attn_weights=UpperCamelCase_ , use_cache=UpperCamelCase_ , scale_attn_by_inverse_layer_idx=UpperCamelCase_ , reorder_and_upcast_attn=UpperCamelCase_ , ) UpperCAmelCase__ : List[str] = GPTaLMHeadModel(UpperCamelCase_ ) def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , ): UpperCAmelCase__ : Optional[Any] = self.transformer.transformer.wte(UpperCamelCase_ ) UpperCAmelCase__ : str = self.encode_prefix(UpperCamelCase_ ) UpperCAmelCase__ : Tuple = self.decode_prefix(UpperCamelCase_ ) UpperCAmelCase__ : List[str] = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: UpperCAmelCase__ : List[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) UpperCAmelCase__ : List[Any] = torch.cat((dummy_token, input_ids) , dim=1 ) UpperCAmelCase__ : List[str] = self.transformer(inputs_embeds=UpperCamelCase_ , labels=UpperCamelCase_ , attention_mask=UpperCamelCase_ ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ): return torch.zeros(UpperCamelCase_ , self.prefix_length , dtype=torch.intaa , device=UpperCamelCase_ ) def __snake_case ( self , UpperCamelCase_ ): return self.encode_prefix(UpperCamelCase_ ) @torch.no_grad() def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): UpperCAmelCase__ : List[str] = torch.split(UpperCamelCase_ , 1 , dim=0 ) UpperCAmelCase__ : Any = [] UpperCAmelCase__ : Optional[Any] = [] for feature in features: UpperCAmelCase__ : List[str] = self.decode_prefix(feature.to(UpperCamelCase_ ) ) # back to the clip feature # Only support beam search for now UpperCAmelCase__ , UpperCAmelCase__ : int = self.generate_beam( input_embeds=UpperCamelCase_ , device=UpperCamelCase_ , eos_token_id=UpperCamelCase_ ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) UpperCAmelCase__ : Optional[int] = torch.stack(UpperCamelCase_ ) UpperCAmelCase__ : Any = torch.stack(UpperCamelCase_ ) return generated_tokens, generated_seq_lengths @torch.no_grad() def __snake_case ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_ = 5 , UpperCamelCase_ = 67 , UpperCamelCase_ = 1.0 , UpperCamelCase_ = None , ): UpperCAmelCase__ : Optional[int] = eos_token_id UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : Union[str, Any] = None UpperCAmelCase__ : List[str] = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ , dtype=torch.int ) UpperCAmelCase__ : str = torch.zeros(UpperCamelCase_ , device=UpperCamelCase_ , dtype=torch.bool ) if input_embeds is not None: UpperCAmelCase__ : List[Any] = input_embeds else: UpperCAmelCase__ : Union[str, Any] = self.transformer.transformer.wte(UpperCamelCase_ ) for i in range(UpperCamelCase_ ): UpperCAmelCase__ : Tuple = self.transformer(inputs_embeds=UpperCamelCase_ ) UpperCAmelCase__ : Dict = outputs.logits UpperCAmelCase__ : List[Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) UpperCAmelCase__ : int = logits.softmax(-1 ).log() if scores is None: UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = logits.topk(UpperCamelCase_ , -1 ) UpperCAmelCase__ : List[Any] = generated.expand(UpperCamelCase_ , *generated.shape[1:] ) UpperCAmelCase__ , UpperCAmelCase__ : List[str] = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: UpperCAmelCase__ : Dict = next_tokens else: UpperCAmelCase__ : Optional[int] = tokens.expand(UpperCamelCase_ , *tokens.shape[1:] ) UpperCAmelCase__ : List[Any] = torch.cat((tokens, next_tokens) , dim=1 ) else: UpperCAmelCase__ : str = -float(np.inf ) UpperCAmelCase__ : Any = 0 UpperCAmelCase__ : int = scores[:, None] + logits seq_lengths[~is_stopped] += 1 UpperCAmelCase__ : Optional[int] = scores_sum / seq_lengths[:, None] UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = scores_sum_average.view(-1 ).topk(UpperCamelCase_ , -1 ) UpperCAmelCase__ : List[Any] = next_tokens // scores_sum.shape[1] UpperCAmelCase__ : str = seq_lengths[next_tokens_source] UpperCAmelCase__ : str = next_tokens % scores_sum.shape[1] UpperCAmelCase__ : Optional[Any] = next_tokens.unsqueeze(1 ) UpperCAmelCase__ : List[str] = tokens[next_tokens_source] UpperCAmelCase__ : List[str] = torch.cat((tokens, next_tokens) , dim=1 ) UpperCAmelCase__ : Any = generated[next_tokens_source] UpperCAmelCase__ : Tuple = scores_sum_average * seq_lengths UpperCAmelCase__ : Tuple = is_stopped[next_tokens_source] UpperCAmelCase__ : List[Any] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) UpperCAmelCase__ : Optional[int] = torch.cat((generated, next_token_embed) , dim=1 ) UpperCAmelCase__ : List[Any] = is_stopped + next_tokens.eq(UpperCamelCase_ ).squeeze() if is_stopped.all(): break UpperCAmelCase__ : Dict = scores / seq_lengths UpperCAmelCase__ : Optional[Any] = scores.argsort(descending=UpperCamelCase_ ) # tokens tensors are already padded to max_seq_length UpperCAmelCase__ : Dict = [tokens[i] for i in order] UpperCAmelCase__ : Optional[Any] = torch.stack(UpperCamelCase_ , dim=0 ) UpperCAmelCase__ : List[str] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
254
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _lowerCAmelCase : Optional[Any] = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Optional[Any] = ['''SpeechEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Tuple = ['''FlaxSpeechEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys _lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
454
"""simple docstring""" from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging __UpperCamelCase : List[Any] = logging.get_logger(__name__) __UpperCamelCase : Any = { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''', } class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" lowercase__ = "t5" lowercase__ = ["past_key_values"] lowercase__ = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self : Tuple ,lowercase_ : Tuple=3_2_1_2_8 ,lowercase_ : Union[str, Any]=5_1_2 ,lowercase_ : Dict=6_4 ,lowercase_ : Any=2_0_4_8 ,lowercase_ : Dict=6 ,lowercase_ : Dict=None ,lowercase_ : Dict=8 ,lowercase_ : Optional[int]=3_2 ,lowercase_ : List[Any]=1_2_8 ,lowercase_ : Any=0.1 ,lowercase_ : Optional[Any]=1E-6 ,lowercase_ : Any=1.0 ,lowercase_ : Dict="relu" ,lowercase_ : Tuple=True ,lowercase_ : List[Any]=True ,lowercase_ : Tuple=0 ,lowercase_ : Optional[Any]=1 ,**lowercase_ : Any ,): lowerCAmelCase__ : Tuple = vocab_size lowerCAmelCase__ : List[str] = d_model lowerCAmelCase__ : Union[str, Any] = d_kv lowerCAmelCase__ : List[Any] = d_ff lowerCAmelCase__ : int = num_layers lowerCAmelCase__ : Union[str, Any] = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowerCAmelCase__ : Any = num_heads lowerCAmelCase__ : Optional[int] = relative_attention_num_buckets lowerCAmelCase__ : Union[str, Any] = relative_attention_max_distance lowerCAmelCase__ : List[Any] = dropout_rate lowerCAmelCase__ : str = layer_norm_epsilon lowerCAmelCase__ : int = initializer_factor lowerCAmelCase__ : Dict = feed_forward_proj lowerCAmelCase__ : int = use_cache lowerCAmelCase__ : int = self.feed_forward_proj.split('''-''' ) lowerCAmelCase__ : str = act_info[-1] lowerCAmelCase__ : Tuple = act_info[0] == '''gated''' if len(lowercase_ ) > 1 and act_info[0] != "gated" or len(lowercase_ ) > 2: raise ValueError( F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.' '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": lowerCAmelCase__ : Tuple = '''gelu_new''' super().__init__( pad_token_id=lowercase_ ,eos_token_id=lowercase_ ,is_encoder_decoder=lowercase_ ,**lowercase_ ,) class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" @property def __lowerCAmelCase ( self : List[Any] ): lowerCAmelCase__ : int = { '''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''}, '''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''}, } if self.use_past: lowerCAmelCase__ : Union[str, Any] = '''past_encoder_sequence + sequence''' lowerCAmelCase__ : List[Any] = {0: '''batch'''} lowerCAmelCase__ : Union[str, Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: lowerCAmelCase__ : Union[str, Any] = {0: '''batch''', 1: '''decoder_sequence'''} lowerCAmelCase__ : Any = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(lowercase_ ,direction='''inputs''' ) return common_inputs @property def __lowerCAmelCase ( self : Optional[int] ): return 1_3
450
0
"""simple docstring""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging UpperCAmelCase__ =logging.get_logger(__name__) UpperCAmelCase__ ={ "EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json", # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class lowerCamelCase__ ( _a ): a : Dict = """gptj""" a : Any = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : List[Any] , A_ : Tuple=5_0_4_0_0 , A_ : str=2_0_4_8 , A_ : str=4_0_9_6 , A_ : Dict=2_8 , A_ : Union[str, Any]=1_6 , A_ : List[str]=6_4 , A_ : Optional[int]=None , A_ : Optional[Any]="gelu_new" , A_ : Optional[int]=0.0 , A_ : str=0.0 , A_ : str=0.0 , A_ : Tuple=1e-5 , A_ : List[str]=0.02 , A_ : Any=True , A_ : int=5_0_2_5_6 , A_ : Optional[Any]=5_0_2_5_6 , A_ : Optional[int]=False , **A_ : List[Any] , ): '''simple docstring''' __lowercase = vocab_size __lowercase = n_positions __lowercase = n_embd __lowercase = n_layer __lowercase = n_head __lowercase = n_inner __lowercase = rotary_dim __lowercase = activation_function __lowercase = resid_pdrop __lowercase = embd_pdrop __lowercase = attn_pdrop __lowercase = layer_norm_epsilon __lowercase = initializer_range __lowercase = use_cache __lowercase = bos_token_id __lowercase = eos_token_id super().__init__( bos_token_id=A_ , eos_token_id=A_ , tie_word_embeddings=A_ , **A_ ) class lowerCamelCase__ ( _a ): def __init__( self : Union[str, Any] , A_ : PretrainedConfig , A_ : str = "default" , A_ : List[PatchingSpec] = None , A_ : bool = False , ): '''simple docstring''' super().__init__(A_ , task=A_ , patching_specs=A_ , use_past=A_ ) if not getattr(self._config , """pad_token_id""" , A_ ): # TODO: how to do that better? __lowercase = 0 @property def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' __lowercase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(A_ , direction="""inputs""" ) __lowercase = {0: """batch""", 1: """past_sequence + sequence"""} else: __lowercase = {0: """batch""", 1: """sequence"""} return common_inputs @property def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' return self._config.n_layer @property def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' return self._config.n_head def SCREAMING_SNAKE_CASE_ ( self : int , A_ : PreTrainedTokenizer , A_ : int = -1 , A_ : int = -1 , A_ : bool = False , A_ : Optional[TensorType] = None , ): '''simple docstring''' __lowercase = super(A_ , self ).generate_dummy_inputs( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) # We need to order the input in the way they appears in the forward() __lowercase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __lowercase , __lowercase = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __lowercase = seqlen + 2 __lowercase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __lowercase = [ (torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(self.num_layers ) ] __lowercase = common_inputs["""attention_mask"""] if self.use_past: __lowercase = ordered_inputs["""attention_mask"""].dtype __lowercase = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 ) return ordered_inputs @property def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' return 1_3
703
"""simple docstring""" import os import numpy import onnx def lowerCAmelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] ): """simple docstring""" __lowercase = a.name __lowercase = b.name __lowercase = """""" __lowercase = """""" __lowercase = a == b __lowercase = name_a __lowercase = name_b return res def lowerCAmelCase_ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Dict ): """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCamelCase__ , UpperCamelCase__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCamelCase__ , UpperCamelCase__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ ) def lowerCAmelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ): """simple docstring""" for n in graph_proto.node: _node_replace_input_with(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def lowerCAmelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Any ): """simple docstring""" __lowercase = list(model.graph.initializer ) __lowercase = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i __lowercase = inits[i].name __lowercase = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCamelCase__ , UpperCamelCase__ ) def lowerCAmelCase_ ( UpperCamelCase__ : List[Any] ): """simple docstring""" __lowercase = os.path.dirname(UpperCamelCase__ ) __lowercase = os.path.basename(UpperCamelCase__ ) __lowercase = onnx.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) __lowercase = list(model.graph.initializer ) __lowercase = set() __lowercase = {} __lowercase = [] __lowercase = 0 for i in range(len(UpperCamelCase__ ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCamelCase__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCamelCase__ ) dup_set.add(UpperCamelCase__ ) __lowercase = inits[j].data_type __lowercase = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("""unexpected data type: """ , UpperCamelCase__ ) total_reduced_size += mem_size __lowercase = inits[i].name __lowercase = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCamelCase__ ) else: __lowercase = [name_j] ind_to_replace.append((j, i) ) print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" ) __lowercase = sorted(UpperCamelCase__ ) _remove_dup_initializers_from_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) __lowercase = """optimized_""" + model_file_name __lowercase = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) onnx.save(UpperCamelCase__ , UpperCamelCase__ ) return new_model
442
0
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() A_ : List[Any] = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) A_ : Dict = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'), ('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'), ('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'), ('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'), ('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'), ('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'), ('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'), ('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'), ('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'), ('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'), ] ) def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]: UpperCamelCase_: str = state_dict.pop(UpperCAmelCase__ ) UpperCamelCase_: Union[str, Any] = val def snake_case (UpperCAmelCase__ ) -> int: UpperCamelCase_: Optional[Any] = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: UpperCamelCase_: int = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' ) UpperCamelCase_: str = value else: UpperCamelCase_: Optional[int] = value return new_state_dict def snake_case (UpperCAmelCase__ , UpperCAmelCase__=False ) -> Union[str, Any]: UpperCamelCase_: List[Any] = '' if is_panoptic: UpperCamelCase_: Union[str, Any] = 'conditional_detr.' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) UpperCamelCase_: Tuple = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) UpperCamelCase_: Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase_: str = in_proj_weight[:2_5_6, :] UpperCamelCase_: int = in_proj_bias[:2_5_6] UpperCamelCase_: Optional[Any] = in_proj_weight[2_5_6:5_1_2, :] UpperCamelCase_: List[str] = in_proj_bias[2_5_6:5_1_2] UpperCamelCase_: Dict = in_proj_weight[-2_5_6:, :] UpperCamelCase_: List[str] = in_proj_bias[-2_5_6:] def snake_case () -> int: UpperCamelCase_: Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg' UpperCamelCase_: List[Any] = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw ) return im @torch.no_grad() def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]: UpperCamelCase_: List[Any] = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: UpperCamelCase_: Dict = 'resnet101' if "dc5" in model_name: UpperCamelCase_: Union[str, Any] = True UpperCamelCase_: Optional[int] = 'panoptic' in model_name if is_panoptic: UpperCamelCase_: Optional[Any] = 2_5_0 else: UpperCamelCase_: Optional[Any] = 9_1 UpperCamelCase_: Any = 'huggingface/label-files' UpperCamelCase_: List[str] = 'coco-detection-id2label.json' UpperCamelCase_: Any = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ , repo_type='dataset' ) , 'r' ) ) UpperCamelCase_: str = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()} UpperCamelCase_: List[str] = idalabel UpperCamelCase_: List[Any] = {v: k for k, v in idalabel.items()} # load image processor UpperCamelCase_: List[Any] = 'coco_panoptic' if is_panoptic else 'coco_detection' UpperCamelCase_: List[Any] = ConditionalDetrImageProcessor(format=UpperCAmelCase__ ) # prepare image UpperCamelCase_: Optional[int] = prepare_img() UpperCamelCase_: Union[str, Any] = image_processor(images=UpperCAmelCase__ , return_tensors='pt' ) UpperCamelCase_: Optional[int] = encoding['pixel_values'] logger.info(F'''Converting model {model_name}...''' ) # load original model from torch hub UpperCamelCase_: Tuple = torch.hub.load('DeppMeng/ConditionalDETR' , UpperCAmelCase__ , pretrained=UpperCAmelCase__ ).eval() UpperCamelCase_: Union[str, Any] = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: UpperCamelCase_: List[str] = 'conditional_detr.' + src rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) UpperCamelCase_: Union[str, Any] = rename_backbone_keys(UpperCAmelCase__ ) # query, key and value matrices need special treatment read_in_q_k_v(UpperCAmelCase__ , is_panoptic=UpperCAmelCase__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them UpperCamelCase_: Optional[Any] = 'conditional_detr.model.' if is_panoptic else 'model.' for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith('conditional_detr' ) and not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ) ): UpperCamelCase_: int = state_dict.pop(UpperCAmelCase__ ) UpperCamelCase_: Union[str, Any] = val elif "class_labels_classifier" in key or "bbox_predictor" in key: UpperCamelCase_: int = state_dict.pop(UpperCAmelCase__ ) UpperCamelCase_: Dict = val elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ): continue else: UpperCamelCase_: List[str] = state_dict.pop(UpperCAmelCase__ ) UpperCamelCase_: Union[str, Any] = val else: if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ): UpperCamelCase_: Optional[int] = state_dict.pop(UpperCAmelCase__ ) UpperCamelCase_: List[str] = val # finally, create HuggingFace model and load state dict UpperCamelCase_: List[Any] = ConditionalDetrForSegmentation(UpperCAmelCase__ ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCAmelCase__ ) model.load_state_dict(UpperCAmelCase__ ) model.eval() model.push_to_hub(repo_id=UpperCAmelCase__ , organization='DepuMeng' , commit_message='Add model' ) # verify our conversion UpperCamelCase_: Union[str, Any] = conditional_detr(UpperCAmelCase__ ) UpperCamelCase_: Optional[int] = model(UpperCAmelCase__ ) assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1E-4 ) # Save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ ) model.save_pretrained(UpperCAmelCase__ ) image_processor.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": A_ : Dict = argparse.ArgumentParser() parser.add_argument( '--model_name', default='conditional_detr_resnet50', type=str, help='Name of the CONDITIONAL_DETR model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) A_ : Dict = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
57
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : str = logging.get_logger(__name__) A_ : Union[str, Any] = { 's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json', } class _lowerCAmelCase( UpperCAmelCase_ ): """simple docstring""" a : Tuple ='''open-llama''' def __init__( self , _lowerCamelCase=1_0_0_0_0_0 , _lowerCamelCase=4_0_9_6 , _lowerCamelCase=1_1_0_0_8 , _lowerCamelCase=3_2 , _lowerCamelCase=3_2 , _lowerCamelCase="silu" , _lowerCamelCase=2_0_4_8 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-6 , _lowerCamelCase=True , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ): UpperCamelCase_: int = vocab_size UpperCamelCase_: List[Any] = max_position_embeddings UpperCamelCase_: Dict = hidden_size UpperCamelCase_: Dict = intermediate_size UpperCamelCase_: Union[str, Any] = num_hidden_layers UpperCamelCase_: Dict = num_attention_heads UpperCamelCase_: Union[str, Any] = hidden_act UpperCamelCase_: Union[str, Any] = initializer_range UpperCamelCase_: List[Any] = rms_norm_eps UpperCamelCase_: Union[str, Any] = use_cache UpperCamelCase_: Dict = kwargs.pop( 'use_memorry_efficient_attention' , _lowerCamelCase ) UpperCamelCase_: Union[str, Any] = hidden_dropout_prob UpperCamelCase_: Any = attention_dropout_prob UpperCamelCase_: int = use_stable_embedding UpperCamelCase_: Tuple = shared_input_output_embedding UpperCamelCase_: str = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase , ) def _a ( self ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , _lowerCamelCase ) or len(self.rope_scaling ) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' f'''got {self.rope_scaling}''' ) UpperCamelCase_: str = self.rope_scaling.get('type' , _lowerCamelCase ) UpperCamelCase_: int = self.rope_scaling.get('factor' , _lowerCamelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(_lowerCamelCase , _lowerCamelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
57
1
"""simple docstring""" from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ): for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase=True ): model.train() SCREAMING_SNAKE_CASE_: Any =model(lowercase ) SCREAMING_SNAKE_CASE_: int =F.mse_loss(lowercase , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(lowercase ) def __magic_name__ ( lowercase , lowercase=False ): set_seed(42 ) SCREAMING_SNAKE_CASE_: int =RegressionModel() SCREAMING_SNAKE_CASE_: List[str] =deepcopy(lowercase ) SCREAMING_SNAKE_CASE_: List[str] =RegressionDataset(length=80 ) SCREAMING_SNAKE_CASE_: Optional[int] =DataLoader(lowercase , batch_size=16 ) model.to(accelerator.device ) if sched: SCREAMING_SNAKE_CASE_: Dict =AdamW(params=model.parameters() , lr=1e-3 ) SCREAMING_SNAKE_CASE_: Dict =AdamW(params=ddp_model.parameters() , lr=1e-3 ) SCREAMING_SNAKE_CASE_: int =LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 ) SCREAMING_SNAKE_CASE_: Optional[int] =LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 ) # Make a copy of `model` if sched: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =accelerator.prepare(lowercase , lowercase , lowercase , lowercase ) else: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =accelerator.prepare(lowercase , lowercase ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def __magic_name__ ( lowercase ): # Test when on a single CPU or GPU that the context manager does nothing SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =get_training_setup(lowercase ) # Use a single batch SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =next(iter(lowercase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase , lowercase , lowercase , lowercase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) else: # Sync grads step_model(lowercase , lowercase , lowercase , lowercase ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(lowercase , lowercase , lowercase , lowercase ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) SCREAMING_SNAKE_CASE_: Optional[Any] =ddp_input[torch.randperm(len(lowercase ) )] def __magic_name__ ( lowercase ): # Test on distributed setup that context manager behaves properly SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =get_training_setup(lowercase ) # Use a single batch SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =next(iter(lowercase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase , lowercase , lowercase , lowercase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) else: # Sync grads step_model(lowercase , lowercase , lowercase , lowercase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) SCREAMING_SNAKE_CASE_: Optional[Any] =ddp_input[torch.randperm(len(lowercase ) )] def __magic_name__ ( lowercase=False , lowercase=False ): SCREAMING_SNAKE_CASE_: Optional[Any] =Accelerator( split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =get_training_setup(lowercase ) for iteration, batch in enumerate(lowercase ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =batch.values() # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase , lowercase , lowercase , lowercase , lowercase ) # Do "gradient accumulation" (noop) with accelerator.accumulate(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(lowercase ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) SCREAMING_SNAKE_CASE_: int =ddp_input[torch.randperm(len(lowercase ) )] GradientState._reset_state() def __magic_name__ ( lowercase=False , lowercase=False ): SCREAMING_SNAKE_CASE_: Dict =Accelerator( split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =get_training_setup(lowercase , lowercase ) for iteration, batch in enumerate(lowercase ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =batch.values() # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(lowercase , lowercase , lowercase , lowercase , lowercase ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowercase )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n''' SCREAMING_SNAKE_CASE_: str =(((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowercase )) if accelerator.num_processes > 1: check_model_parameters(lowercase , lowercase , lowercase , lowercase ) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) GradientState._reset_state() def __magic_name__ ( ): SCREAMING_SNAKE_CASE_: List[Any] =Accelerator() SCREAMING_SNAKE_CASE_: Tuple =RegressionDataset(length=80 ) SCREAMING_SNAKE_CASE_: Optional[int] =DataLoader(lowercase , batch_size=16 ) SCREAMING_SNAKE_CASE_: str =RegressionDataset(length=96 ) SCREAMING_SNAKE_CASE_: Optional[Any] =DataLoader(lowercase , batch_size=16 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =accelerator.prepare(lowercase , lowercase ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(lowercase ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase ) if iteration < len(lowercase ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(lowercase ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase ) if batch_num < len(lowercase ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def __magic_name__ ( ): SCREAMING_SNAKE_CASE_: Optional[Any] =Accelerator() SCREAMING_SNAKE_CASE_: Any =accelerator.state if state.local_process_index == 0: print("""**Test `accumulate` gradient accumulation with dataloader break**""" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("""**Test NOOP `no_sync` context manager**""" ) test_noop_sync(lowercase ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("""**Test Distributed `no_sync` context manager**""" ) test_distributed_sync(lowercase ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(lowercase , lowercase ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(lowercase , lowercase ) def __magic_name__ ( lowercase ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
36
"""simple docstring""" from __future__ import annotations def __magic_name__ ( lowercase , lowercase ): SCREAMING_SNAKE_CASE_: List[Any] =sorted(numsa + numsa ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =divmod(len(lowercase ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() _UpperCAmelCase = [float(x) for x in input("""Enter the elements of first array: """).split()] _UpperCAmelCase = [float(x) for x in input("""Enter the elements of second array: """).split()] print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
36
1
'''simple docstring''' import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} __lowerCAmelCase = { """vocab_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } __lowerCAmelCase = { """vocab_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } __lowerCAmelCase = { """vocab_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json""" ), }, } __lowerCAmelCase = { """facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2, """facebook/dpr-ctx_encoder-multiset-base""": 5_1_2, } __lowerCAmelCase = { """facebook/dpr-question_encoder-single-nq-base""": 5_1_2, """facebook/dpr-question_encoder-multiset-base""": 5_1_2, } __lowerCAmelCase = { """facebook/dpr-reader-single-nq-base""": 5_1_2, """facebook/dpr-reader-multiset-base""": 5_1_2, } __lowerCAmelCase = { """facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True}, } __lowerCAmelCase = { """facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True}, } __lowerCAmelCase = { """facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True}, } class UpperCAmelCase__ ( lowerCAmelCase__ ): """simple docstring""" __UpperCAmelCase : Dict = VOCAB_FILES_NAMES __UpperCAmelCase : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase : Any = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION __UpperCAmelCase : List[Any] = DPRContextEncoderTokenizer class UpperCAmelCase__ ( lowerCAmelCase__ ): """simple docstring""" __UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES __UpperCAmelCase : Dict = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION __UpperCAmelCase : int = DPRQuestionEncoderTokenizer __lowerCAmelCase = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) __lowerCAmelCase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) __lowerCAmelCase = r""" Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(lowerCAmelCase__ ) class UpperCAmelCase__ : """simple docstring""" def __call__( self : List[str] ,_a : Tuple ,_a : Optional[str] = None ,_a : Optional[str] = None ,_a : Union[bool, str] = False ,_a : Union[bool, str] = False ,_a : Optional[int] = None ,_a : Optional[Union[str, TensorType]] = None ,_a : Optional[bool] = None ,**_a : Dict ,): '''simple docstring''' if titles is None and texts is None: return super().__call__( _A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,) elif titles is None or texts is None: _a : Tuple = titles if texts is None else texts return super().__call__( _A ,_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,) _a : List[Any] = titles if not isinstance(_A ,_A ) else [titles] _a : Any = texts if not isinstance(_A ,_A ) else [texts] _a : List[Any] = len(_A ) _a : Optional[Any] = questions if not isinstance(_A ,_A ) else [questions] * n_passages assert len(_A ) == len( _A ), F"""There should be as many titles than texts but got {len(_A )} titles and {len(_A )} texts.""" _a : Tuple = super().__call__(_A ,_A ,padding=_A ,truncation=_A )['''input_ids'''] _a : Any = super().__call__(_A ,add_special_tokens=_A ,padding=_A ,truncation=_A )['''input_ids'''] _a : str = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(_A ,_A ) ] } if return_attention_mask is not False: _a : Union[str, Any] = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) _a : str = attention_mask return self.pad(_A ,padding=_A ,max_length=_A ,return_tensors=_A ) def __lowercase ( self : int ,_a : BatchEncoding ,_a : DPRReaderOutput ,_a : int = 16 ,_a : int = 64 ,_a : int = 4 ,): '''simple docstring''' _a : Union[str, Any] = reader_input['''input_ids'''] _a : List[Any] = reader_output[:3] _a : int = len(_A ) _a : str = sorted(range(_A ) ,reverse=_A ,key=relevance_logits.__getitem__ ) _a : List[DPRReaderOutput] = [] for doc_id in sorted_docs: _a : Optional[int] = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence _a : Optional[int] = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: _a : int = sequence_ids.index(self.pad_token_id ) else: _a : Union[str, Any] = len(_A ) _a : Tuple = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_A ,top_spans=_A ,) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_A ,start_index=_A ,end_index=_A ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) ) if len(_A ) >= num_spans: break return nbest_spans_predictions[:num_spans] def __lowercase ( self : List[str] ,_a : List[int] ,_a : List[int] ,_a : int ,_a : int ,): '''simple docstring''' _a : Dict = [] for start_index, start_score in enumerate(_A ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) _a : Optional[Any] = sorted(_A ,key=lambda _a : x[1] ,reverse=_A ) _a : int = [] for (start_index, end_index), score in scores: assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]""" _a : Dict = end_index - start_index + 1 assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}""" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(_A ) == top_spans: break return chosen_span_intervals @add_end_docstrings(lowerCAmelCase__ ) class UpperCAmelCase__ ( lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" __UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES __UpperCAmelCase : Optional[int] = READER_PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase : Optional[Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase : Any = READER_PRETRAINED_INIT_CONFIGURATION __UpperCAmelCase : Dict = ['''input_ids''', '''attention_mask'''] __UpperCAmelCase : Tuple = DPRReaderTokenizer
229
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowercase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") lowercase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the training data.'''} ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the validation data.'''} ) lowerCAmelCase_ = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) lowerCAmelCase_ = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) lowerCAmelCase_ = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = {} if self.train_dir is not None: __SCREAMING_SNAKE_CASE : Dict = self.train_dir if self.validation_dir is not None: __SCREAMING_SNAKE_CASE : Any = self.validation_dir __SCREAMING_SNAKE_CASE : List[Any] = data_files if data_files else None @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowerCAmelCase__ )} , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) lowerCAmelCase_ = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class __UpperCamelCase : """simple docstring""" def __init__( self : Tuple , _A : Optional[int]=192 , _A : List[Any]=32 , _A : Optional[int]=4 , _A : str=0.6 ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = input_size __SCREAMING_SNAKE_CASE : List[str] = mask_patch_size __SCREAMING_SNAKE_CASE : Dict = model_patch_size __SCREAMING_SNAKE_CASE : int = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError('''Input size must be divisible by mask patch size''' ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError('''Mask patch size must be divisible by model patch size''' ) __SCREAMING_SNAKE_CASE : Any = self.input_size // self.mask_patch_size __SCREAMING_SNAKE_CASE : Optional[Any] = self.mask_patch_size // self.model_patch_size __SCREAMING_SNAKE_CASE : int = self.rand_size**2 __SCREAMING_SNAKE_CASE : Optional[int] = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = np.random.permutation(self.token_count )[: self.mask_count] __SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros(self.token_count , dtype=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = 1 __SCREAMING_SNAKE_CASE : List[str] = mask.reshape((self.rand_size, self.rand_size) ) __SCREAMING_SNAKE_CASE : List[Any] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack([example['''pixel_values'''] for example in examples] ) __SCREAMING_SNAKE_CASE : Any = torch.stack([example['''mask'''] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def a__ ( ): """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_mim''' , snake_case , snake_case ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = training_args.get_process_log_level() logger.setLevel(snake_case ) transformers.utils.logging.set_verbosity(snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. __SCREAMING_SNAKE_CASE : Tuple = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __SCREAMING_SNAKE_CASE : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset. __SCREAMING_SNAKE_CASE : Tuple = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. __SCREAMING_SNAKE_CASE : Any = None if '''validation''' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , snake_case ) and data_args.train_val_split > 0.0: __SCREAMING_SNAKE_CASE : List[str] = ds['''train'''].train_test_split(data_args.train_val_split ) __SCREAMING_SNAKE_CASE : int = split['''train'''] __SCREAMING_SNAKE_CASE : Dict = split['''test'''] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __SCREAMING_SNAKE_CASE : List[Any] = { '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name_or_path: __SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case ) elif model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case ) else: __SCREAMING_SNAKE_CASE : List[Any] = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(snake_case , '''decoder_type''' ): __SCREAMING_SNAKE_CASE : Any = '''simmim''' # adapt config __SCREAMING_SNAKE_CASE : str = model_args.image_size if model_args.image_size is not None else config.image_size __SCREAMING_SNAKE_CASE : int = model_args.patch_size if model_args.patch_size is not None else config.patch_size __SCREAMING_SNAKE_CASE : str = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { '''image_size''': model_args.image_size, '''patch_size''': model_args.patch_size, '''encoder_stride''': model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: __SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case ) elif model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case ) else: __SCREAMING_SNAKE_CASE : List[Any] = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } __SCREAMING_SNAKE_CASE : str = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : int = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('''Training new model from scratch''' ) __SCREAMING_SNAKE_CASE : List[Any] = AutoModelForMaskedImageModeling.from_config(snake_case ) if training_args.do_train: __SCREAMING_SNAKE_CASE : Any = ds['''train'''].column_names else: __SCREAMING_SNAKE_CASE : int = ds['''validation'''].column_names if data_args.image_column_name is not None: __SCREAMING_SNAKE_CASE : List[Any] = data_args.image_column_name elif "image" in column_names: __SCREAMING_SNAKE_CASE : str = '''image''' elif "img" in column_names: __SCREAMING_SNAKE_CASE : List[str] = '''img''' else: __SCREAMING_SNAKE_CASE : Tuple = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py __SCREAMING_SNAKE_CASE : Any = Compose( [ Lambda(lambda snake_case : img.convert('''RGB''' ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator __SCREAMING_SNAKE_CASE : str = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(snake_case ): __SCREAMING_SNAKE_CASE : str = [transforms(snake_case ) for image in examples[image_column_name]] __SCREAMING_SNAKE_CASE : str = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError('''--do_train requires a train dataset''' ) if data_args.max_train_samples is not None: __SCREAMING_SNAKE_CASE : Dict = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError('''--do_eval requires a validation dataset''' ) if data_args.max_eval_samples is not None: __SCREAMING_SNAKE_CASE : Union[str, Any] = ( ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(snake_case ) # Initialize our trainer __SCREAMING_SNAKE_CASE : List[str] = Trainer( model=snake_case , args=snake_case , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=snake_case , data_collator=snake_case , ) # Training if training_args.do_train: __SCREAMING_SNAKE_CASE : Union[str, Any] = None if training_args.resume_from_checkpoint is not None: __SCREAMING_SNAKE_CASE : Tuple = training_args.resume_from_checkpoint elif last_checkpoint is not None: __SCREAMING_SNAKE_CASE : int = last_checkpoint __SCREAMING_SNAKE_CASE : Tuple = trainer.train(resume_from_checkpoint=snake_case ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: __SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate() trainer.log_metrics('''eval''' , snake_case ) trainer.save_metrics('''eval''' , snake_case ) # Write model card and (optionally) push to hub __SCREAMING_SNAKE_CASE : Optional[Any] = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''masked-image-modeling''', '''dataset''': data_args.dataset_name, '''tags''': ['''masked-image-modeling'''], } if training_args.push_to_hub: trainer.push_to_hub(**snake_case ) else: trainer.create_model_card(**snake_case ) if __name__ == "__main__": main()
74
0
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class __A ( UpperCamelCase__ ): def A__ ( self :Tuple , __snake_case :str ): '''simple docstring''' with open(__snake_case , encoding="""utf-8""" ) as input_file: __magic_name__ : str =re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" ) __magic_name__ : str =input_file.read() __magic_name__ : List[Any] =regexp.search(__snake_case ) return match def A__ ( self :List[Any] , __snake_case :str ): '''simple docstring''' with open(__snake_case , encoding="""utf-8""" ) as input_file: __magic_name__ : Union[str, Any] =re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL ) __magic_name__ : Optional[int] =input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` __magic_name__ : Optional[Any] =regexp.finditer(__snake_case ) __magic_name__ : Tuple =[match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def A__ ( self :List[Any] ): '''simple docstring''' __magic_name__ : Tuple =Path("""./datasets""" ) __magic_name__ : List[str] =list(dataset_paths.absolute().glob("""**/*.py""" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(__snake_case ) ): raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}" ) def A__ ( self :Optional[Any] ): '''simple docstring''' __magic_name__ : Dict =Path("""./datasets""" ) __magic_name__ : str =list(dataset_paths.absolute().glob("""**/*.py""" ) ) for dataset in dataset_files: if self._no_print_statements(str(__snake_case ) ): raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead." )
718
def lowerCAmelCase_ ( lowerCamelCase ): __magic_name__ : Optional[int] =[] __magic_name__ : int =[] __magic_name__ : str ={ """^""": 3, """*""": 2, """/""": 2, """%""": 2, """+""": 1, """-""": 1, } # Priority of each operator __magic_name__ : Dict =len(lowerCamelCase ) if (len(lowerCamelCase ) > 7) else 7 # Print table header for output print( """Symbol""".center(8 ) , """Stack""".center(lowerCamelCase ) , """Postfix""".center(lowerCamelCase ) , sep=""" | """ , ) print("""-""" * (print_width * 3 + 7) ) for x in infix: if x.isalpha() or x.isdigit(): post_fix.append(lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix elif x == "(": stack.append(lowerCamelCase ) # if x is "(" push to Stack elif x == ")": # if x is ")" pop stack until "(" is encountered while stack[-1] != "(": post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix stack.pop() else: if len(lowerCamelCase ) == 0: stack.append(lowerCamelCase ) # If stack is empty, push x to stack else: # while priority of x is not > priority of element in the stack while len(lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]: post_fix.append(stack.pop() ) # pop stack & add to Postfix stack.append(lowerCamelCase ) # push x to stack print( x.center(8 ) , ("""""".join(lowerCamelCase )).ljust(lowerCamelCase ) , ("""""".join(lowerCamelCase )).ljust(lowerCamelCase ) , sep=""" | """ , ) # Output in tabular format while len(lowerCamelCase ) > 0: # while stack is not empty post_fix.append(stack.pop() ) # pop stack & add to Postfix print( """ """.center(8 ) , ("""""".join(lowerCamelCase )).ljust(lowerCamelCase ) , ("""""".join(lowerCamelCase )).ljust(lowerCamelCase ) , sep=""" | """ , ) # Output in tabular format return "".join(lowerCamelCase ) # return Postfix as str def lowerCAmelCase_ ( lowerCamelCase ): __magic_name__ : Union[str, Any] =list(infix[::-1] ) # reverse the infix equation for i in range(len(lowerCamelCase ) ): if infix[i] == "(": __magic_name__ : str =""")""" # change "(" to ")" elif infix[i] == ")": __magic_name__ : int ="""(""" # change ")" to "(" return (infix_2_postfix("""""".join(lowerCamelCase ) ))[ ::-1 ] # call infix_2_postfix on Infix, return reverse of Postfix if __name__ == "__main__": UpperCAmelCase_ : Tuple = input("\nEnter an Infix Equation = ") # Input an Infix equation UpperCAmelCase_ : Dict = "".join(Infix.split()) # Remove spaces from the input print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
367
0
from __future__ import annotations class lowerCamelCase__ : '''simple docstring''' def __init__( self :Dict , a :str , a :str ) -> Union[str, Any]: __UpperCamelCase , __UpperCamelCase : Optional[int] = text, pattern __UpperCamelCase , __UpperCamelCase : Tuple = len(a ), len(a ) def _lowerCamelCase ( self :Any , a :str ) -> int: for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def _lowerCamelCase ( self :str , a :int ) -> int: for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def _lowerCamelCase ( self :Union[str, Any] ) -> list[int]: # searches pattern in text and returns index positions __UpperCamelCase : Any = [] for i in range(self.textLen - self.patLen + 1 ): __UpperCamelCase : List[Any] = self.mismatch_in_text(a ) if mismatch_index == -1: positions.append(a ) else: __UpperCamelCase : Any = self.match_in_pattern(self.text[mismatch_index] ) __UpperCamelCase : Dict = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions lowercase : Any = 'ABAABA' lowercase : str = 'AB' lowercase : str = BoyerMooreSearch(text, pattern) lowercase : Union[str, Any] = bms.bad_character_heuristic() if len(positions) == 0: print('No match found') else: print('Pattern found in following positions: ') print(positions)
557
import warnings from ...utils import logging from .image_processing_perceiver import PerceiverImageProcessor lowercase : List[str] = logging.get_logger(__name__) class lowerCamelCase__ ( __lowercase): '''simple docstring''' def __init__( self :Dict , *a :Tuple , **a :List[Any] ) -> None: warnings.warn( "The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use PerceiverImageProcessor instead." , a , ) super().__init__(*a , **a )
557
1
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. _a : str = 2_00 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. _a : Optional[int] = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. _a : str = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 10_00)) def UpperCamelCase__ ( _A: str , _A: str ): '''simple docstring''' __lowerCamelCase = len([g for position, g in enumerate(_A ) if g == main_target[position]] ) return (item, float(_A )) def UpperCamelCase__ ( _A: str , _A: str ): '''simple docstring''' __lowerCamelCase = random.randint(0 , len(_A ) - 1 ) __lowerCamelCase = parent_a[:random_slice] + parent_a[random_slice:] __lowerCamelCase = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def UpperCamelCase__ ( _A: str , _A: list[str] ): '''simple docstring''' __lowerCamelCase = list(_A ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: __lowerCamelCase = random.choice(_A ) return "".join(_A ) def UpperCamelCase__ ( _A: tuple[str, float] , _A: list[tuple[str, float]] , _A: list[str] , ): '''simple docstring''' __lowerCamelCase = [] # Generate more children proportionally to the fitness score. __lowerCamelCase = int(parent_a[1] * 100 ) + 1 __lowerCamelCase = 10 if child_n >= 10 else child_n for _ in range(_A ): __lowerCamelCase = population_score[random.randint(0 , _A )][0] __lowerCamelCase , __lowerCamelCase = crossover(parent_a[0] , _A ) # Append new string to the population list. pop.append(mutate(_A , _A ) ) pop.append(mutate(_A , _A ) ) return pop def UpperCamelCase__ ( _A: str , _A: list[str] , _A: bool = True ): '''simple docstring''' if N_POPULATION < N_SELECTED: __lowerCamelCase = f'''{N_POPULATION} must be bigger than {N_SELECTED}''' raise ValueError(_A ) # Verify that the target contains no genes besides the ones inside genes variable. __lowerCamelCase = sorted({c for c in target if c not in genes} ) if not_in_genes_list: __lowerCamelCase = f'''{not_in_genes_list} is not in genes list, evolution cannot converge''' raise ValueError(_A ) # Generate random starting population. __lowerCamelCase = [] for _ in range(_A ): population.append("""""".join([random.choice(_A ) for i in range(len(_A ) )] ) ) # Just some logs to know what the algorithms is doing. __lowerCamelCase , __lowerCamelCase = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(_A ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. __lowerCamelCase = [evaluate(_A , _A ) for item in population] # Check if there is a matching evolution. __lowerCamelCase = sorted(_A , key=lambda _A : x[1] , reverse=_A ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f'''\nGeneration: {generation}''' f'''\nTotal Population:{total_population}''' f'''\nBest score: {population_score[0][1]}''' f'''\nBest string: {population_score[0][0]}''' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. __lowerCamelCase = population[: int(N_POPULATION / 3 )] population.clear() population.extend(_A ) # Normalize population score to be between 0 and 1. __lowerCamelCase = [ (item, score / len(_A )) for item, score in population_score ] # This is selection for i in range(_A ): population.extend(select(population_score[int(_A )] , _A , _A ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(_A ) > N_POPULATION: break if __name__ == "__main__": _a : Tuple = ( 'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!' ) _a : List[Any] = list( ' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm' 'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\' ) _a ,_a ,_a : Union[str, Any] = basic(target_str, genes_list) print( F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}""" )
571
def UpperCamelCase__ ( _A: Tuple , _A: List[str] , _A: Tuple , _A: Dict , _A: int , _A: List[str] ): '''simple docstring''' if index == r: for j in range(_A ): print(data[j] , end=""" """ ) print(""" """ ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location __lowerCamelCase = arr[i] combination_util(_A , _A , _A , index + 1 , _A , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(_A , _A , _A , _A , _A , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def UpperCamelCase__ ( _A: Union[str, Any] , _A: List[str] , _A: str ): '''simple docstring''' __lowerCamelCase = [0] * r # Print all combination using temporary array 'data[]' combination_util(_A , _A , _A , 0 , _A , 0 ) if __name__ == "__main__": # Driver code to check the function above _a : Union[str, Any] = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
571
1
from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __lowercase : """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=2 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=36 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=6 , __UpperCAmelCase=6 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , __UpperCAmelCase=10_00 , ) -> Optional[int]: A : int = parent A : int = batch_size A : Optional[Any] = num_channels A : int = image_size A : Any = patch_size A : Any = is_training A : int = use_input_mask A : Union[str, Any] = use_token_type_ids A : Optional[Any] = use_labels A : str = vocab_size A : Any = hidden_size A : str = num_hidden_layers A : Union[str, Any] = num_attention_heads A : Tuple = intermediate_size A : Optional[Any] = hidden_act A : Union[str, Any] = hidden_dropout_prob A : Tuple = attention_probs_dropout_prob A : Optional[Any] = max_position_embeddings A : List[str] = type_vocab_size A : Tuple = type_sequence_label_size A : Optional[Any] = initializer_range A : Tuple = coordinate_size A : Tuple = shape_size A : List[str] = num_labels A : Optional[Any] = num_choices A : Optional[int] = scope A : List[str] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) A : Optional[Any] = text_seq_length A : int = (image_size // patch_size) ** 2 + 1 A : Any = self.text_seq_length + self.image_seq_length def snake_case ( self ) -> Dict: A : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) A : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) A : Tuple = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: A : Any = bbox[i, j, 3] A : Tuple = bbox[i, j, 1] A : int = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: A : str = bbox[i, j, 2] A : int = bbox[i, j, 0] A : Union[str, Any] = tmp_coordinate A : Tuple = tf.constant(__UpperCAmelCase ) A : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Union[str, Any] = None if self.use_input_mask: A : Dict = random_attention_mask([self.batch_size, self.text_seq_length] ) A : List[Any] = None if self.use_token_type_ids: A : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) A : Any = None A : str = None if self.use_labels: A : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) A : Dict = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: A : int = TFLayoutLMvaModel(config=__UpperCAmelCase ) # text + image A : Dict = model(__UpperCAmelCase , pixel_values=__UpperCAmelCase , training=__UpperCAmelCase ) A : List[Any] = model( __UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , training=__UpperCAmelCase , ) A : Any = model(__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , training=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only A : Union[str, Any] = model(__UpperCAmelCase , training=__UpperCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only A : Tuple = model({'''pixel_values''': pixel_values} , training=__UpperCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: A : str = self.num_labels A : Any = TFLayoutLMvaForSequenceClassification(config=__UpperCAmelCase ) A : str = model( __UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: A : Optional[int] = self.num_labels A : int = TFLayoutLMvaForTokenClassification(config=__UpperCAmelCase ) A : Optional[Any] = model( __UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: A : Tuple = 2 A : List[Any] = TFLayoutLMvaForQuestionAnswering(config=__UpperCAmelCase ) A : str = model( __UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , training=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case ( self ) -> Optional[Any]: A : Tuple = self.prepare_config_and_inputs() ((A) , (A) , (A) , (A) , (A) , (A) , (A) , (A)) : int = config_and_inputs A : Union[str, Any] = { '''input_ids''': input_ids, '''bbox''': bbox, '''pixel_values''': pixel_values, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_tf class __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Optional[int] = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) UpperCAmelCase_ : List[str] = ( {'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel} if is_tf_available() else {} ) UpperCAmelCase_ : Dict = False UpperCAmelCase_ : int = False UpperCAmelCase_ : str = False def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: return True def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> dict: A : Dict = copy.deepcopy(__UpperCAmelCase ) if model_class in get_values(__UpperCAmelCase ): A : Any = { k: tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(__UpperCAmelCase , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__UpperCAmelCase ): A : Optional[Any] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__UpperCAmelCase ): A : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) A : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__UpperCAmelCase ): A : List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__UpperCAmelCase ): A : Optional[int] = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def snake_case ( self ) -> Optional[int]: A : str = TFLayoutLMvaModelTester(self ) A : Optional[int] = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def snake_case ( self ) -> Any: self.config_tester.run_common_tests() def snake_case ( self ) -> Union[str, Any]: A , A : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : int = model_class(__UpperCAmelCase ) if getattr(__UpperCAmelCase , '''hf_compute_loss''' , __UpperCAmelCase ): # The number of elements in the loss should be the same as the number of elements in the label A : Optional[Any] = self._prepare_for_class(inputs_dict.copy() , __UpperCAmelCase , return_labels=__UpperCAmelCase ) A : int = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__UpperCAmelCase )[0] ] A : Optional[int] = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs A : Optional[Any] = self._prepare_for_class(inputs_dict.copy() , __UpperCAmelCase , return_labels=__UpperCAmelCase ) A : Optional[int] = prepared_for_class.pop('''input_ids''' ) A : Any = model(__UpperCAmelCase , **__UpperCAmelCase )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions A : Dict = self._prepare_for_class(inputs_dict.copy() , __UpperCAmelCase , return_labels=__UpperCAmelCase ) A : Optional[Any] = prepared_for_class.pop('''input_ids''' ) if "labels" in prepared_for_class: A : Optional[int] = prepared_for_class['''labels'''].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: A : List[Any] = -1_00 A : Any = tf.convert_to_tensor(__UpperCAmelCase ) A : Any = model(__UpperCAmelCase , **__UpperCAmelCase )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict A : Any = self._prepare_for_class(inputs_dict.copy() , __UpperCAmelCase , return_labels=__UpperCAmelCase ) A : List[str] = model(__UpperCAmelCase )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple A : str = self._prepare_for_class(inputs_dict.copy() , __UpperCAmelCase , return_labels=__UpperCAmelCase ) # Get keys that were added with the _prepare_for_class function A : Optional[Any] = prepared_for_class.keys() - inputs_dict.keys() A : List[str] = inspect.signature(model.call ).parameters A : List[Any] = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple A : Dict = {0: '''input_ids'''} for label_key in label_keys: A : Optional[int] = signature_names.index(__UpperCAmelCase ) A : int = label_key A : Optional[int] = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple A : Tuple = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: A : int = prepared_for_class[value] A : Union[str, Any] = tuple(__UpperCAmelCase ) # Send to model A : Any = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def snake_case ( self ) -> Optional[int]: ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def snake_case ( self ) -> List[Any]: ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) : List[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A : Dict = type self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def snake_case ( self ) -> Any: ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def snake_case ( self ) -> Any: ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def snake_case ( self ) -> Optional[int]: ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) @slow def snake_case ( self ) -> Optional[Any]: for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Tuple = TFLayoutLMvaModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def snake_case__ ( ): A : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf class __lowercase ( unittest.TestCase ): """simple docstring""" @cached_property def snake_case ( self ) -> Tuple: return LayoutLMvaImageProcessor(apply_ocr=__UpperCAmelCase ) if is_vision_available() else None @slow def snake_case ( self ) -> Tuple: A : Dict = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ) A : List[str] = self.default_image_processor A : str = prepare_img() A : List[str] = image_processor(images=__UpperCAmelCase , return_tensors='''tf''' ).pixel_values A : Any = tf.constant([[1, 2]] ) A : int = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass A : Tuple = model(input_ids=__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , training=__UpperCAmelCase ) # verify the logits A : List[str] = (1, 1_99, 7_68) self.assertEqual(outputs.last_hidden_state.shape , __UpperCAmelCase ) A : str = tf.constant( [[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
542
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Optional[int] = StableDiffusionInstructPixaPixPipeline UpperCAmelCase_ : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''} UpperCAmelCase_ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS UpperCAmelCase_ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS UpperCAmelCase_ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS def snake_case ( self ) -> List[Any]: torch.manual_seed(0 ) A : Tuple = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) A : Any = PNDMScheduler(skip_prk_steps=__UpperCAmelCase ) torch.manual_seed(0 ) A : Dict = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) A : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) A : Dict = CLIPTextModel(__UpperCAmelCase ) A : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) A : int = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ) -> Dict: A : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) A : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0] A : Optional[Any] = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('''RGB''' ) if str(__UpperCAmelCase ).startswith('''mps''' ): A : str = torch.manual_seed(__UpperCAmelCase ) else: A : Optional[Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) A : List[Any] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''image_guidance_scale''': 1, '''output_type''': '''numpy''', } return inputs def snake_case ( self ) -> Union[str, Any]: A : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator A : Dict = self.get_dummy_components() A : str = StableDiffusionInstructPixaPixPipeline(**__UpperCAmelCase ) A : Any = sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) A : Optional[Any] = self.get_dummy_inputs(__UpperCAmelCase ) A : int = sd_pipe(**__UpperCAmelCase ).images A : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) A : Optional[Any] = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def snake_case ( self ) -> int: A : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator A : Optional[Any] = self.get_dummy_components() A : Any = StableDiffusionInstructPixaPixPipeline(**__UpperCAmelCase ) A : str = sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) A : str = self.get_dummy_inputs(__UpperCAmelCase ) A : Optional[int] = '''french fries''' A : Any = sd_pipe(**__UpperCAmelCase , negative_prompt=__UpperCAmelCase ) A : Optional[int] = output.images A : str = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) A : Any = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def snake_case ( self ) -> Tuple: A : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator A : List[Any] = self.get_dummy_components() A : Optional[int] = StableDiffusionInstructPixaPixPipeline(**__UpperCAmelCase ) A : Tuple = sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) A : str = self.get_dummy_inputs(__UpperCAmelCase ) A : int = [inputs['''prompt''']] * 2 A : List[str] = np.array(inputs['''image'''] ).astype(np.floataa ) / 2_5_5.0 A : Any = torch.from_numpy(__UpperCAmelCase ).unsqueeze(0 ).to(__UpperCAmelCase ) A : Union[str, Any] = image / 2 + 0.5 A : str = image.permute(0 , 3 , 1 , 2 ) A : List[str] = image.repeat(2 , 1 , 1 , 1 ) A : Union[str, Any] = sd_pipe(**__UpperCAmelCase ).images A : int = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) A : Any = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def snake_case ( self ) -> Dict: A : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator A : List[Any] = self.get_dummy_components() A : Union[str, Any] = EulerAncestralDiscreteScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' ) A : int = StableDiffusionInstructPixaPixPipeline(**__UpperCAmelCase ) A : Tuple = sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) A : Tuple = self.get_dummy_inputs(__UpperCAmelCase ) A : Optional[Any] = sd_pipe(**__UpperCAmelCase ).images A : Dict = image[0, -3:, -3:, -1] A : Optional[int] = [round(__UpperCAmelCase , 4 ) for x in image_slice.flatten().tolist()] print(''','''.join([str(__UpperCAmelCase ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) A : Dict = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def snake_case ( self ) -> Union[str, Any]: super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def snake_case ( self ) -> Union[str, Any]: A : int = self.get_dummy_components() A : int = StableDiffusionInstructPixaPixPipeline(**__UpperCAmelCase ) A : Any = VaeImageProcessor(do_resize=__UpperCAmelCase , do_normalize=__UpperCAmelCase ) A : Optional[Any] = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) A : Union[str, Any] = pipe(**self.get_dummy_inputs_by_type(__UpperCAmelCase , input_image_type='''pt''' ) )[0] A : Optional[Any] = components['''vae'''] A : Dict = self.get_dummy_inputs_by_type(__UpperCAmelCase , input_image_type='''pt''' ) for image_param in self.image_latents_params: if image_param in inputs.keys(): A : Optional[Any] = vae.encode(inputs[image_param] ).latent_dist.mode() A : List[str] = pipe(**__UpperCAmelCase )[0] A : Any = np.abs(out - out_latents_inputs ).max() self.assertLess(__UpperCAmelCase , 1E-4 , '''passing latents as image input generate different result from passing image''' ) @slow @require_torch_gpu class __lowercase ( unittest.TestCase ): """simple docstring""" def snake_case ( self ) -> str: super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self , __UpperCAmelCase=0 ) -> Tuple: A : List[str] = torch.manual_seed(__UpperCAmelCase ) A : List[str] = load_image( '''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' ) A : Dict = { '''prompt''': '''turn him into a cyborg''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''image_guidance_scale''': 1.0, '''output_type''': '''numpy''', } return inputs def snake_case ( self ) -> Optional[int]: A : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() A : Optional[int] = self.get_inputs() A : Any = pipe(**__UpperCAmelCase ).images A : Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_12, 5_12, 3) A : Dict = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def snake_case ( self ) -> Dict: A : str = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=__UpperCAmelCase ) A : List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() A : Union[str, Any] = self.get_inputs() A : Union[str, Any] = pipe(**__UpperCAmelCase ).images A : str = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_12, 5_12, 3) A : Dict = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def snake_case ( self ) -> List[Any]: A : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=__UpperCAmelCase ) A : List[str] = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() A : str = self.get_inputs() A : List[Any] = pipe(**__UpperCAmelCase ).images A : str = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_12, 5_12, 3) A : Any = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def snake_case ( self ) -> Optional[Any]: A : Optional[Any] = 0 def callback_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> None: A : List[Any] = True nonlocal number_of_steps number_of_steps += 1 if step == 1: A : Optional[Any] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) A : Union[str, Any] = latents[0, -3:, -3:, -1] A : List[str] = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: A : int = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) A : Optional[int] = latents[0, -3:, -3:, -1] A : str = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 A : Optional[Any] = False A : str = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa ) A : List[str] = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() A : List[Any] = self.get_inputs() pipe(**__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def snake_case ( self ) -> Tuple: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() A : Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa ) A : Dict = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() A : Optional[Any] = self.get_inputs() A : str = pipe(**__UpperCAmelCase ) A : Dict = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def snake_case ( self ) -> int: A : Union[str, Any] = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 A : Union[str, Any] = inputs['''image'''].resize((5_04, 5_04) ) A : str = '''timbrooks/instruct-pix2pix''' A : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained( __UpperCAmelCase , safety_checker=__UpperCAmelCase , ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() A : Tuple = pipe(**__UpperCAmelCase ) A : List[str] = output.images[0] A : List[Any] = image[2_55:2_58, 3_83:3_86, -1] assert image.shape == (5_04, 5_04, 3) A : Any = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
542
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : List[Any] = logging.get_logger(__name__) UpperCamelCase__ : Optional[int] = { 'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json', } class _UpperCamelCase ( lowerCamelCase__ ): '''simple docstring''' _A : Dict = 'switch_transformers' _A : Optional[int] = ['past_key_values'] _A : Tuple = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self : Any , lowerCAmelCase__ : Optional[Any]=3_2_1_2_8 , lowerCAmelCase__ : List[str]=7_6_8 , lowerCAmelCase__ : List[Any]=6_4 , lowerCAmelCase__ : List[str]=2_0_4_8 , lowerCAmelCase__ : Any=6_4 , lowerCAmelCase__ : Optional[Any]=1_2 , lowerCAmelCase__ : List[str]=3 , lowerCAmelCase__ : Any=1_2 , lowerCAmelCase__ : Tuple=3 , lowerCAmelCase__ : str=1_2 , lowerCAmelCase__ : Optional[Any]=8 , lowerCAmelCase__ : Dict=False , lowerCAmelCase__ : Dict=0.01 , lowerCAmelCase__ : str="float32" , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : int=3_2 , lowerCAmelCase__ : Tuple=1_2_8 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : Optional[int]=1E-6 , lowerCAmelCase__ : Optional[Any]=0.0_01 , lowerCAmelCase__ : int=0.0_01 , lowerCAmelCase__ : Dict=1.0 , lowerCAmelCase__ : Any="relu" , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Union[str, Any]=0 , lowerCAmelCase__ : List[Any]=1 , **lowerCAmelCase__ : Dict , ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = vocab_size __SCREAMING_SNAKE_CASE : Any = d_model __SCREAMING_SNAKE_CASE : Optional[Any] = d_kv __SCREAMING_SNAKE_CASE : List[str] = d_ff __SCREAMING_SNAKE_CASE : Optional[int] = num_sparse_encoder_layers __SCREAMING_SNAKE_CASE : Dict = num_layers __SCREAMING_SNAKE_CASE : List[str] = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __SCREAMING_SNAKE_CASE : Optional[Any] = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: __SCREAMING_SNAKE_CASE : Dict = self.num_layers // self.num_sparse_encoder_layers else: __SCREAMING_SNAKE_CASE : List[str] = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: __SCREAMING_SNAKE_CASE : Any = self.num_decoder_layers // self.num_sparse_decoder_layers else: __SCREAMING_SNAKE_CASE : List[str] = self.num_decoder_layers # HACK: this will create 0 sparse layers __SCREAMING_SNAKE_CASE : Tuple = num_heads __SCREAMING_SNAKE_CASE : int = num_experts __SCREAMING_SNAKE_CASE : Optional[int] = expert_capacity __SCREAMING_SNAKE_CASE : Optional[int] = router_bias __SCREAMING_SNAKE_CASE : Union[str, Any] = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" ) __SCREAMING_SNAKE_CASE : int = router_dtype __SCREAMING_SNAKE_CASE : Tuple = router_ignore_padding_tokens __SCREAMING_SNAKE_CASE : List[str] = relative_attention_num_buckets __SCREAMING_SNAKE_CASE : Tuple = relative_attention_max_distance __SCREAMING_SNAKE_CASE : List[str] = dropout_rate __SCREAMING_SNAKE_CASE : List[Any] = layer_norm_epsilon __SCREAMING_SNAKE_CASE : str = initializer_factor __SCREAMING_SNAKE_CASE : Dict = feed_forward_proj __SCREAMING_SNAKE_CASE : str = use_cache __SCREAMING_SNAKE_CASE : List[str] = add_router_probs __SCREAMING_SNAKE_CASE : Dict = router_z_loss_coef __SCREAMING_SNAKE_CASE : Union[str, Any] = router_aux_loss_coef __SCREAMING_SNAKE_CASE : Tuple = self.feed_forward_proj.split("""-""" ) __SCREAMING_SNAKE_CASE : int = act_info[-1] __SCREAMING_SNAKE_CASE : Union[str, Any] = act_info[0] == '''gated''' if len(UpperCAmelCase__ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase__ ) > 2: raise ValueError( F"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer." """Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """ """\'gated-gelu\' or \'relu\'""" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": __SCREAMING_SNAKE_CASE : Tuple = '''gelu_new''' super().__init__( pad_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__ , )
718
'''simple docstring''' import copy from typing import Dict, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING from ..detr import DetrConfig from ..swin import SwinConfig UpperCamelCase__ : Optional[int] = { '''facebook/maskformer-swin-base-ade''': ( '''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json''' ) # See all MaskFormer models at https://huggingface.co/models?filter=maskformer } UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) class _UpperCamelCase ( lowerCamelCase__ ): '''simple docstring''' _A : Optional[Any] = '''maskformer''' _A : Optional[int] = {'''hidden_size''': '''mask_feature_size'''} _A : int = ['''resnet''', '''swin'''] _A : Any = ['''detr'''] def __init__( self : Any , lowerCAmelCase__ : int = 2_5_6 , lowerCAmelCase__ : int = 2_5_6 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[Dict] = None , lowerCAmelCase__ : Optional[Dict] = None , lowerCAmelCase__ : float = 0.02 , lowerCAmelCase__ : float = 1.0 , lowerCAmelCase__ : float = 1.0 , lowerCAmelCase__ : float = 1.0 , lowerCAmelCase__ : float = 20.0 , lowerCAmelCase__ : Optional[bool] = None , **lowerCAmelCase__ : Any , ): """simple docstring""" if backbone_config is None: # fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k __SCREAMING_SNAKE_CASE : Any = SwinConfig( image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): __SCREAMING_SNAKE_CASE : Dict = backbone_config.pop("""model_type""" ) __SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAPPING[backbone_model_type] __SCREAMING_SNAKE_CASE : Union[str, Any] = config_class.from_dict(lowerCAmelCase__ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. " F"Supported model types: {','.join(self.backbones_supported )}" ) if decoder_config is None: # fall back to https://huggingface.co/facebook/detr-resnet-50 __SCREAMING_SNAKE_CASE : List[str] = DetrConfig() else: # verify that the decoder is supported __SCREAMING_SNAKE_CASE : List[Any] = ( decoder_config.pop("""model_type""" ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else decoder_config.model_type ) if decoder_type not in self.decoders_supported: raise ValueError( F"Transformer Decoder {decoder_type} not supported, please use one of" F" {','.join(self.decoders_supported )}" ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): __SCREAMING_SNAKE_CASE : int = CONFIG_MAPPING[decoder_type] __SCREAMING_SNAKE_CASE : Dict = config_class.from_dict(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Any = backbone_config __SCREAMING_SNAKE_CASE : Optional[int] = decoder_config # main feature dimension for the model __SCREAMING_SNAKE_CASE : List[str] = fpn_feature_size __SCREAMING_SNAKE_CASE : int = mask_feature_size # initializer __SCREAMING_SNAKE_CASE : str = init_std __SCREAMING_SNAKE_CASE : Optional[Any] = init_xavier_std # Hungarian matcher && loss __SCREAMING_SNAKE_CASE : Tuple = cross_entropy_weight __SCREAMING_SNAKE_CASE : Dict = dice_weight __SCREAMING_SNAKE_CASE : Union[str, Any] = mask_weight __SCREAMING_SNAKE_CASE : Optional[Any] = use_auxiliary_loss __SCREAMING_SNAKE_CASE : Union[str, Any] = no_object_weight __SCREAMING_SNAKE_CASE : Dict = output_auxiliary_logits __SCREAMING_SNAKE_CASE : Any = self.decoder_config.encoder_attention_heads __SCREAMING_SNAKE_CASE : int = self.decoder_config.num_hidden_layers super().__init__(**lowerCAmelCase__ ) @classmethod def UpperCamelCase__ ( cls : List[Any] , lowerCAmelCase__ : PretrainedConfig , lowerCAmelCase__ : PretrainedConfig , **lowerCAmelCase__ : Optional[int] ): """simple docstring""" return cls( backbone_config=lowerCAmelCase__ , decoder_config=lowerCAmelCase__ , **lowerCAmelCase__ , ) def UpperCamelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = copy.deepcopy(self.__dict__ ) __SCREAMING_SNAKE_CASE : Dict = self.backbone_config.to_dict() __SCREAMING_SNAKE_CASE : str = self.decoder_config.to_dict() __SCREAMING_SNAKE_CASE : List[str] = self.__class__.model_type return output
178
0
from __future__ import annotations class A_ : """simple docstring""" def __init__( self : Optional[int] ,__A : Dict ,__A : List[str] ) -> Optional[Any]: _lowercase , _lowercase = text, pattern _lowercase , _lowercase = len(a__ ), len(a__ ) def __UpperCAmelCase ( self : int ,__A : int ) -> List[Any]: for i in range(self.patLen - 1 ,-1 ,-1 ): if char == self.pattern[i]: return i return -1 def __UpperCAmelCase ( self : List[Any] ,__A : Optional[int] ) -> Optional[int]: for i in range(self.patLen - 1 ,-1 ,-1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def __UpperCAmelCase ( self : Tuple ) -> int: # searches pattern in text and returns index positions _lowercase = [] for i in range(self.textLen - self.patLen + 1 ): _lowercase = self.mismatch_in_text(a__ ) if mismatch_index == -1: positions.append(a__ ) else: _lowercase = self.match_in_pattern(self.text[mismatch_index] ) _lowercase = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions snake_case = """ABAABA""" snake_case = """AB""" snake_case = BoyerMooreSearch(text, pattern) snake_case = bms.bad_character_heuristic() if len(positions) == 0: print("""No match found""") else: print("""Pattern found in following positions: """) print(positions)
67
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase = logging.get_logger(__name__) class _UpperCAmelCase ( A__ ): UpperCamelCase__ = '''timm_backbone''' def __init__( self , a__=None , a__=3 , a__=True , a__=True , a__=None , **a__ , ): super().__init__(**a__) A__ = backbone A__ = num_channels A__ = features_only A__ = use_pretrained_backbone A__ = True A__ = out_indices if out_indices is not None else (-1,)
632
0
"""simple docstring""" import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL lowerCamelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""") def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False , ): output_path.parent.mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( lowerCAmelCase__ , lowerCAmelCase__ , f=output_path.as_posix() , input_names=lowerCAmelCase__ , output_names=lowerCAmelCase__ , dynamic_axes=lowerCAmelCase__ , do_constant_folding=lowerCAmelCase__ , use_external_data_format=lowerCAmelCase__ , enable_onnx_checker=lowerCAmelCase__ , opset_version=lowerCAmelCase__ , ) else: export( lowerCAmelCase__ , lowerCAmelCase__ , f=output_path.as_posix() , input_names=lowerCAmelCase__ , output_names=lowerCAmelCase__ , dynamic_axes=lowerCAmelCase__ , do_constant_folding=lowerCAmelCase__ , opset_version=lowerCAmelCase__ , ) @torch.no_grad() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False ): UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): UpperCAmelCase_ = "cuda" elif fpaa and not torch.cuda.is_available(): raise ValueError("`float16` model export is only supported on GPUs with CUDA" ) else: UpperCAmelCase_ = "cpu" UpperCAmelCase_ = Path(lowerCAmelCase__ ) # VAE DECODER UpperCAmelCase_ = AutoencoderKL.from_pretrained(model_path + "/vae" ) UpperCAmelCase_ = vae_decoder.config.latent_channels # forward only through the decoder part UpperCAmelCase_ = vae_decoder.decode onnx_export( lowerCAmelCase__ , model_args=( torch.randn(1 , lowerCAmelCase__ , 25 , 25 ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ), False, ) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={ "latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"}, } , opset=lowerCAmelCase__ , ) del vae_decoder if __name__ == "__main__": lowerCamelCase = argparse.ArgumentParser() parser.add_argument( """--model_path""", type=str, required=True, help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""", ) parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--opset""", default=14, type=int, help="""The version of the ONNX operator set to use.""", ) parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""") lowerCamelCase = parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print("""SD: Done: ONNX""")
14
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCamelCase = logging.get_logger(__name__) class lowercase__ ( SCREAMING_SNAKE_CASE ): '''simple docstring''' UpperCamelCase = ['''pixel_values'''] def __init__( self : str , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : float = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 255 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , **_UpperCAmelCase : Optional[int] , ) -> None: '''simple docstring''' super().__init__(**_UpperCAmelCase ) UpperCAmelCase_ = size if size is not None else {"shortest_edge": 384} UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) UpperCAmelCase_ = do_resize UpperCAmelCase_ = size # Default value set here for backwards compatibility where the value in config is None UpperCAmelCase_ = crop_pct if crop_pct is not None else 224 / 256 UpperCAmelCase_ = resample UpperCAmelCase_ = do_rescale UpperCAmelCase_ = rescale_factor UpperCAmelCase_ = do_normalize UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : float , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Optional[int] , ) -> np.ndarray: '''simple docstring''' UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) if "shortest_edge" not in size: raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" ) UpperCAmelCase_ = size["shortest_edge"] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct UpperCAmelCase_ = int(shortest_edge / crop_pct ) UpperCAmelCase_ = get_resize_output_image_size(_UpperCAmelCase , size=_UpperCAmelCase , default_to_square=_UpperCAmelCase ) UpperCAmelCase_ = resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=_UpperCAmelCase , size=(shortest_edge, shortest_edge) , data_format=_UpperCAmelCase , **_UpperCAmelCase ) else: # warping (no cropping) when evaluated at 384 or larger return resize( _UpperCAmelCase , size=(shortest_edge, shortest_edge) , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def lowercase__ ( self : Tuple , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[Any] , ) -> Any: '''simple docstring''' return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def lowercase__ ( self : Dict , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[str] , ) -> np.ndarray: '''simple docstring''' return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : ImageInput , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : float = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : float = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : Optional[int] , ) -> PIL.Image.Image: '''simple docstring''' UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ = crop_pct if crop_pct is not None else self.crop_pct UpperCAmelCase_ = resample if resample is not None else self.resample UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_ = image_std if image_std is not None else self.image_std UpperCAmelCase_ = size if size is not None else self.size UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) UpperCAmelCase_ = make_list_of_images(_UpperCAmelCase ) if not valid_images(_UpperCAmelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError("crop_pct must be specified if size < 384." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. UpperCAmelCase_ = [to_numpy_array(_UpperCAmelCase ) for image in images] if do_resize: UpperCAmelCase_ = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , crop_pct=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images] if do_rescale: UpperCAmelCase_ = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images] if do_normalize: UpperCAmelCase_ = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images] UpperCAmelCase_ = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images] UpperCAmelCase_ = {"pixel_values": images} return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
14
1
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = OrderedDict( [ # Base model mapping ("albert", "FlaxAlbertModel"), ("bart", "FlaxBartModel"), ("beit", "FlaxBeitModel"), ("bert", "FlaxBertModel"), ("big_bird", "FlaxBigBirdModel"), ("blenderbot", "FlaxBlenderbotModel"), ("blenderbot-small", "FlaxBlenderbotSmallModel"), ("clip", "FlaxCLIPModel"), ("distilbert", "FlaxDistilBertModel"), ("electra", "FlaxElectraModel"), ("gpt-sw3", "FlaxGPT2Model"), ("gpt2", "FlaxGPT2Model"), ("gpt_neo", "FlaxGPTNeoModel"), ("gptj", "FlaxGPTJModel"), ("longt5", "FlaxLongT5Model"), ("marian", "FlaxMarianModel"), ("mbart", "FlaxMBartModel"), ("mt5", "FlaxMT5Model"), ("opt", "FlaxOPTModel"), ("pegasus", "FlaxPegasusModel"), ("regnet", "FlaxRegNetModel"), ("resnet", "FlaxResNetModel"), ("roberta", "FlaxRobertaModel"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"), ("roformer", "FlaxRoFormerModel"), ("t5", "FlaxT5Model"), ("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"), ("vit", "FlaxViTModel"), ("wav2vec2", "FlaxWav2Vec2Model"), ("whisper", "FlaxWhisperModel"), ("xglm", "FlaxXGLMModel"), ("xlm-roberta", "FlaxXLMRobertaModel"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for pre-training mapping ("albert", "FlaxAlbertForPreTraining"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForPreTraining"), ("big_bird", "FlaxBigBirdForPreTraining"), ("electra", "FlaxElectraForPreTraining"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("t5", "FlaxT5ForConditionalGeneration"), ("wav2vec2", "FlaxWav2Vec2ForPreTraining"), ("whisper", "FlaxWhisperForConditionalGeneration"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Masked LM mapping ("albert", "FlaxAlbertForMaskedLM"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForMaskedLM"), ("big_bird", "FlaxBigBirdForMaskedLM"), ("distilbert", "FlaxDistilBertForMaskedLM"), ("electra", "FlaxElectraForMaskedLM"), ("mbart", "FlaxMBartForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("bart", "FlaxBartForConditionalGeneration"), ("blenderbot", "FlaxBlenderbotForConditionalGeneration"), ("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"), ("encoder-decoder", "FlaxEncoderDecoderModel"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("marian", "FlaxMarianMTModel"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("pegasus", "FlaxPegasusForConditionalGeneration"), ("t5", "FlaxT5ForConditionalGeneration"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Image-classsification ("beit", "FlaxBeitForImageClassification"), ("regnet", "FlaxRegNetForImageClassification"), ("resnet", "FlaxResNetForImageClassification"), ("vit", "FlaxViTForImageClassification"), ] ) UpperCAmelCase_ = OrderedDict( [ ("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Causal LM mapping ("bart", "FlaxBartForCausalLM"), ("bert", "FlaxBertForCausalLM"), ("big_bird", "FlaxBigBirdForCausalLM"), ("electra", "FlaxElectraForCausalLM"), ("gpt-sw3", "FlaxGPT2LMHeadModel"), ("gpt2", "FlaxGPT2LMHeadModel"), ("gpt_neo", "FlaxGPTNeoForCausalLM"), ("gptj", "FlaxGPTJForCausalLM"), ("opt", "FlaxOPTForCausalLM"), ("roberta", "FlaxRobertaForCausalLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"), ("xglm", "FlaxXGLMForCausalLM"), ("xlm-roberta", "FlaxXLMRobertaForCausalLM"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Sequence Classification mapping ("albert", "FlaxAlbertForSequenceClassification"), ("bart", "FlaxBartForSequenceClassification"), ("bert", "FlaxBertForSequenceClassification"), ("big_bird", "FlaxBigBirdForSequenceClassification"), ("distilbert", "FlaxDistilBertForSequenceClassification"), ("electra", "FlaxElectraForSequenceClassification"), ("mbart", "FlaxMBartForSequenceClassification"), ("roberta", "FlaxRobertaForSequenceClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"), ("roformer", "FlaxRoFormerForSequenceClassification"), ("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Question Answering mapping ("albert", "FlaxAlbertForQuestionAnswering"), ("bart", "FlaxBartForQuestionAnswering"), ("bert", "FlaxBertForQuestionAnswering"), ("big_bird", "FlaxBigBirdForQuestionAnswering"), ("distilbert", "FlaxDistilBertForQuestionAnswering"), ("electra", "FlaxElectraForQuestionAnswering"), ("mbart", "FlaxMBartForQuestionAnswering"), ("roberta", "FlaxRobertaForQuestionAnswering"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"), ("roformer", "FlaxRoFormerForQuestionAnswering"), ("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Token Classification mapping ("albert", "FlaxAlbertForTokenClassification"), ("bert", "FlaxBertForTokenClassification"), ("big_bird", "FlaxBigBirdForTokenClassification"), ("distilbert", "FlaxDistilBertForTokenClassification"), ("electra", "FlaxElectraForTokenClassification"), ("roberta", "FlaxRobertaForTokenClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"), ("roformer", "FlaxRoFormerForTokenClassification"), ("xlm-roberta", "FlaxXLMRobertaForTokenClassification"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Multiple Choice mapping ("albert", "FlaxAlbertForMultipleChoice"), ("bert", "FlaxBertForMultipleChoice"), ("big_bird", "FlaxBigBirdForMultipleChoice"), ("distilbert", "FlaxDistilBertForMultipleChoice"), ("electra", "FlaxElectraForMultipleChoice"), ("roberta", "FlaxRobertaForMultipleChoice"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"), ("roformer", "FlaxRoFormerForMultipleChoice"), ("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"), ] ) UpperCAmelCase_ = OrderedDict( [ ("bert", "FlaxBertForNextSentencePrediction"), ] ) UpperCAmelCase_ = OrderedDict( [ ("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"), ("whisper", "FlaxWhisperForConditionalGeneration"), ] ) UpperCAmelCase_ = OrderedDict( [ ("whisper", "FlaxWhisperForAudioClassification"), ] ) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[str] = FLAX_MODEL_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModel) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Optional[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining") class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling") class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling") class __UpperCamelCase ( _BaseAutoModelClass ): __A : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[str] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="sequence classification" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering") class __UpperCamelCase ( _BaseAutoModelClass ): __A : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="token classification" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice") class __UpperCamelCase ( _BaseAutoModelClass ): __A : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForImageClassification, head_doc="image classification" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Optional[int] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling") class __UpperCamelCase ( _BaseAutoModelClass ): __A : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling" )
32
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __a = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["""ReformerTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["""ReformerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """ReformerAttention""", """ReformerForMaskedLM""", """ReformerForQuestionAnswering""", """ReformerForSequenceClassification""", """ReformerLayer""", """ReformerModel""", """ReformerModelWithLMHead""", """ReformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
377
0
import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class lowerCamelCase_ ( unittest.TestCase ): def __magic_name__ ( self ): a_ = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) a_ = Vector() def __magic_name__ ( self ): a_ = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(_SCREAMING_SNAKE_CASE ) , """(0,0,0,0,0,1)""" ) def __magic_name__ ( self ): a_ = Vector([1, 2, 3, 4] ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 4 ) def __magic_name__ ( self ): a_ = Vector([1, 2] ) a_ = Vector([1, 2, 3, 4, 5] ) a_ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) a_ = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 ) def __magic_name__ ( self ): a_ = Vector([1, 2, 3] ) a_ = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def __magic_name__ ( self ): a_ = Vector([1, 2, 3] ) a_ = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def __magic_name__ ( self ): a_ = Vector([1, 2, 3] ) a_ = Vector([2, -1, 4] ) # for test of dot product a_ = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" ) self.assertEqual((a * b) , 0 ) def __magic_name__ ( self ): self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 ) def __magic_name__ ( self ): self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" ) def __magic_name__ ( self ): a_ = Vector([1, 2, 3] ) a_ = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) , """(3,4,7)""" ) def __magic_name__ ( self ): a_ = Vector([1, 0, 0, 0, 0, 0] ) a_ = x.copy() self.assertEqual(str(_SCREAMING_SNAKE_CASE ) , str(_SCREAMING_SNAKE_CASE ) ) def __magic_name__ ( self ): a_ = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(_SCREAMING_SNAKE_CASE ) , """(0,1,0)""" ) def __magic_name__ ( self ): a_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(_SCREAMING_SNAKE_CASE ) ) def __magic_name__ ( self ): a_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a_ = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def __magic_name__ ( self ): a_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a_ = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def __magic_name__ ( self ): a_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def __magic_name__ ( self ): a_ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) a_ = Vector([1, 2, 3] ) self.assertEqual("""(14,32,50)""" , str(a * x ) ) self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) ) def __magic_name__ ( self ): a_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(_SCREAMING_SNAKE_CASE ) ) def __magic_name__ ( self ): a_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 ) def __magic_name__ ( self ): a_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) ) def __magic_name__ ( self ): a_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) ) def __magic_name__ ( self ): self.assertEqual( """|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
403
import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging _A = logging.get_logger(__name__) def __SCREAMING_SNAKE_CASE ( ) -> Tuple: """simple docstring""" a_ = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" ) try: # Parse it and check the field "partitions" is included, it is required for model parallel. a_ = json.loads(UpperCamelCase ) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. a_ = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" ) try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". a_ = json.loads(UpperCamelCase ) if not mpi_options.get("""sagemaker_mpi_enabled""" , UpperCamelCase ): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec("""smdistributed""" ) is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ): _lowerCamelCase : str = field( default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , ) def __magic_name__ ( self ): super().__post_init__() warnings.warn( """`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """ """`TrainingArguments` instead.""" , _SCREAMING_SNAKE_CASE , ) @cached_property def __magic_name__ ( self ): logger.info("""PyTorch: setting up devices""" ) if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( """torch.distributed process group is initialized, but local_rank == -1. """ """In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" ) if self.no_cuda: a_ = torch.device("""cpu""" ) a_ = 0 elif is_sagemaker_model_parallel_available(): a_ = smp.local_rank() a_ = torch.device("""cuda""" , _SCREAMING_SNAKE_CASE ) a_ = 1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta ) a_ = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) ) a_ = torch.device("""cuda""" , self.local_rank ) a_ = 1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 a_ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. a_ = torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta ) a_ = torch.device("""cuda""" , self.local_rank ) a_ = 1 if device.type == "cuda": torch.cuda.set_device(_SCREAMING_SNAKE_CASE ) return device @property def __magic_name__ ( self ): if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def __magic_name__ ( self ): return not is_sagemaker_model_parallel_available() @property def __magic_name__ ( self ): return False
403
1
import glob import os import random from string import ascii_lowercase, digits import cva SCREAMING_SNAKE_CASE :Optional[int] = """""" SCREAMING_SNAKE_CASE :List[str] = """""" SCREAMING_SNAKE_CASE :Optional[int] = """""" SCREAMING_SNAKE_CASE :Dict = 1 # (0 is vertical, 1 is horizontal) def lowerCAmelCase( )-> None: """simple docstring""" UpperCamelCase_ , UpperCamelCase_ = get_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) print("Processing..." ) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = update_image_and_anno(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for index, image in enumerate(SCREAMING_SNAKE_CASE_ ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' UpperCamelCase_ = random_chars(3_2 ) UpperCamelCase_ = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0] UpperCamelCase_ = f"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}" cva.imwrite(f"/{file_root}.jpg" , SCREAMING_SNAKE_CASE_ , [cva.IMWRITE_JPEG_QUALITY, 8_5] ) print(f"Success {index+1}/{len(SCREAMING_SNAKE_CASE_ )} with {file_name}" ) UpperCamelCase_ = [] for anno in new_annos[index]: UpperCamelCase_ = f"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}" annos_list.append(SCREAMING_SNAKE_CASE_ ) with open(f"/{file_root}.txt" , "w" ) as outfile: outfile.write("\n".join(line for line in annos_list ) ) def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> tuple[list, list]: """simple docstring""" UpperCamelCase_ = [] UpperCamelCase_ = [] for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE_ , "*.txt" ) ): UpperCamelCase_ = label_file.split(os.sep )[-1].rsplit("." , 1 )[0] with open(SCREAMING_SNAKE_CASE_ ) as in_file: UpperCamelCase_ = in_file.readlines() UpperCamelCase_ = os.path.join(SCREAMING_SNAKE_CASE_ , f"{label_name}.jpg" ) UpperCamelCase_ = [] for obj_list in obj_lists: UpperCamelCase_ = obj_list.rstrip("\n" ).split(" " ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(SCREAMING_SNAKE_CASE_ ) labels.append(SCREAMING_SNAKE_CASE_ ) return img_paths, labels def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1 )-> tuple[list, list, list]: """simple docstring""" UpperCamelCase_ = [] UpperCamelCase_ = [] UpperCamelCase_ = [] for idx in range(len(SCREAMING_SNAKE_CASE_ ) ): UpperCamelCase_ = [] UpperCamelCase_ = img_list[idx] path_list.append(SCREAMING_SNAKE_CASE_ ) UpperCamelCase_ = anno_list[idx] UpperCamelCase_ = cva.imread(SCREAMING_SNAKE_CASE_ ) if flip_type == 1: UpperCamelCase_ = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for bbox in img_annos: UpperCamelCase_ = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: UpperCamelCase_ = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for bbox in img_annos: UpperCamelCase_ = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(SCREAMING_SNAKE_CASE_ ) new_imgs_list.append(SCREAMING_SNAKE_CASE_ ) return new_imgs_list, new_annos_lists, path_list def lowerCAmelCase( SCREAMING_SNAKE_CASE_ = 3_2 )-> str: """simple docstring""" assert number_char > 1, "The number of character should greater than 1" UpperCamelCase_ = ascii_lowercase + digits return "".join(random.choice(SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ ) ) if __name__ == "__main__": main() print("""DONE ✅""")
628
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) SCREAMING_SNAKE_CASE :Union[str, Any] = { """configuration_speecht5""": [ """SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""", """SpeechT5Config""", """SpeechT5HifiGanConfig""", ], """feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""], """processing_speecht5""": ["""SpeechT5Processor"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Union[str, Any] = ["""SpeechT5Tokenizer"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Any = [ """SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""", """SpeechT5ForSpeechToText""", """SpeechT5ForSpeechToSpeech""", """SpeechT5ForTextToSpeech""", """SpeechT5Model""", """SpeechT5PreTrainedModel""", """SpeechT5HifiGan""", ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE :List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
628
1
import unittest import numpy as np from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class A_ ( unittest.TestCase ): def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int]=7 , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=18 , __SCREAMING_SNAKE_CASE : Tuple=30 , __SCREAMING_SNAKE_CASE : List[Any]=4_00 , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : Tuple=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Optional[Any]=[0.5, 0.5, 0.5] , ): __a = size if size is not None else {"height": 18, "width": 18} __a = parent __a = batch_size __a = num_channels __a = image_size __a = min_resolution __a = max_resolution __a = do_resize __a = size __a = do_normalize __a = image_mean __a = image_std def _UpperCAmelCase ( self : int ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class A_ ( a_ , unittest.TestCase ): _SCREAMING_SNAKE_CASE = DPTImageProcessor if is_vision_available() else None def _UpperCAmelCase ( self : Dict ): __a = DPTImageProcessingTester(self ) @property def _UpperCAmelCase ( self : Optional[int] ): return self.image_processor_tester.prepare_image_processor_dict() def _UpperCAmelCase ( self : Any ): __a = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , "image_mean" ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , "image_std" ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , "do_normalize" ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , "do_resize" ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , "size" ) ) def _UpperCAmelCase ( self : Dict ): __a = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 18, "width": 18} ) __a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) def _UpperCAmelCase ( self : str ): # Initialize image_processing __a = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched __a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def _UpperCAmelCase ( self : List[Any] ): # Initialize image_processing __a = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched __a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def _UpperCAmelCase ( self : List[Any] ): # Initialize image_processing __a = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched __a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , )
525
from ... import PretrainedConfig SCREAMING_SNAKE_CASE : Any = { """sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""", } class A_ ( a_ ): _SCREAMING_SNAKE_CASE = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP _SCREAMING_SNAKE_CASE = """nezha""" def __init__( self : Any , __SCREAMING_SNAKE_CASE : List[str]=2_11_28 , __SCREAMING_SNAKE_CASE : Dict=7_68 , __SCREAMING_SNAKE_CASE : List[Any]=12 , __SCREAMING_SNAKE_CASE : str=12 , __SCREAMING_SNAKE_CASE : Optional[Any]=30_72 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : int=5_12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=64 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : int=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=1E-12 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Any=0 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=3 , __SCREAMING_SNAKE_CASE : List[Any]=True , **__SCREAMING_SNAKE_CASE : List[str] , ): super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __a = vocab_size __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = hidden_act __a = intermediate_size __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = max_relative_position __a = type_vocab_size __a = initializer_range __a = layer_norm_eps __a = classifier_dropout __a = use_cache
525
1
from __future__ import annotations def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> tuple[int, int]: if b == 0: return (1, 0) ((__lowercase) , (__lowercase)) : Any = extended_euclid(__lowerCAmelCase , a % b ) __lowercase : Dict = a // b return (y, x - k * y) def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int: ((__lowercase) , (__lowercase)) : List[str] = extended_euclid(__lowerCAmelCase , __lowerCAmelCase ) __lowercase : List[str] = na * na __lowercase : Union[str, Any] = ra * x * na + ra * y * na return (n % m + m) % m def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> int: ((__lowercase) , (__lowercase)) : Optional[int] = extended_euclid(__lowerCAmelCase , __lowerCAmelCase ) if b < 0: __lowercase : Any = (b % n + n) % n return b def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int: __lowercase , __lowercase : Optional[int] = invert_modulo(__lowerCAmelCase , __lowerCAmelCase ), invert_modulo(__lowerCAmelCase , __lowerCAmelCase ) __lowercase : Any = na * na __lowercase : List[str] = ra * x * na + ra * y * na return (n % m + m) % m if __name__ == "__main__": from doctest import testmod testmod(name="chinese_remainder_theorem", verbose=True) testmod(name="chinese_remainder_theorem2", verbose=True) testmod(name="invert_modulo", verbose=True) testmod(name="extended_euclid", verbose=True)
509
import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu __lowerCAmelCase : Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json" with io.open(filename, "r", encoding="utf-8") as f: __lowerCAmelCase : List[Any] = json.load(f) @require_torch class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case_ ( self : List[str] , _snake_case : List[Any] ): return FSMTTokenizer.from_pretrained(_snake_case ) def snake_case_ ( self : Any , _snake_case : List[str] ): __lowercase : str = FSMTForConditionalGeneration.from_pretrained(_snake_case ).to(_snake_case ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ['''en-ru''', 26.0], ['''ru-en''', 22.0], ['''en-de''', 22.0], ['''de-en''', 29.0], ] ) @slow def snake_case_ ( self : Tuple , _snake_case : int , _snake_case : Union[str, Any] ): # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality __lowercase : Tuple = F'facebook/wmt19-{pair}' __lowercase : Tuple = self.get_tokenizer(_snake_case ) __lowercase : Dict = self.get_model(_snake_case ) __lowercase : Dict = bleu_data[pair]['''src'''] __lowercase : Any = bleu_data[pair]['''tgt'''] __lowercase : Any = tokenizer(_snake_case , return_tensors='''pt''' , truncation=_snake_case , padding='''longest''' ).to(_snake_case ) __lowercase : Optional[int] = model.generate( input_ids=batch.input_ids , num_beams=8 , ) __lowercase : Any = tokenizer.batch_decode( _snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case ) __lowercase : Tuple = calculate_bleu(_snake_case , _snake_case ) print(_snake_case ) self.assertGreaterEqual(scores['''bleu'''] , _snake_case )
509
1
"""simple docstring""" import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ): lowercase__ : Any = BarthezTokenizer lowercase__ : int = BarthezTokenizerFast lowercase__ : int = True lowercase__ : List[str] = True def lowercase_ ( self ): '''simple docstring''' super().setUp() A__ = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=UpperCamelCase__ ) A__ = tokenizer def lowercase_ ( self ): '''simple docstring''' A__ = "<pad>" A__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ ) def lowercase_ ( self ): '''simple docstring''' A__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(UpperCamelCase__ ) , 10_11_22 ) def lowercase_ ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 ) @require_torch def lowercase_ ( self ): '''simple docstring''' A__ = ["A long paragraph for summarization.", "Another paragraph for summarization."] A__ = [0, 57, 30_18, 7_03_07, 91, 2] A__ = self.tokenizer( UpperCamelCase__ , max_length=len(UpperCamelCase__ ) , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="pt" ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) A__ = batch.input_ids.tolist()[0] self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) def lowercase_ ( self ): '''simple docstring''' if not self.test_rust_tokenizer: return A__ = self.get_tokenizer() A__ = self.get_rust_tokenizer() A__ = "I was born in 92000, and this is falsé." A__ = tokenizer.tokenize(UpperCamelCase__ ) A__ = rust_tokenizer.tokenize(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) A__ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) A__ = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) A__ = self.get_rust_tokenizer() A__ = tokenizer.encode(UpperCamelCase__ ) A__ = rust_tokenizer.encode(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) @slow def lowercase_ ( self ): '''simple docstring''' A__ = {"input_ids": [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. A__ = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase__ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=UpperCamelCase__ , )
261
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFXLMRobertaModel @require_tf @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): @slow def lowercase_ ( self ): '''simple docstring''' A__ = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" ) A__ = { "input_ids": tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute" "attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ), } A__ = model(UpperCamelCase__ )["last_hidden_state"] A__ = tf.TensorShape((1, 6, 7_68) ) self.assertEqual(output.shape , UpperCamelCase__ ) # compare the actual values for a slice. A__ = tf.convert_to_tensor( [ [ [0.068_1762, 0.1089_4451, 0.0677_2504], [-0.0642_3668, 0.0236_6615, 0.0432_9344], [-0.0605_7295, 0.0997_4135, -0.0007_0584], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
261
1
"""simple docstring""" import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def snake_case ( A__ ,A__ ,A__ ): if isinstance(A__ ,torch.Tensor ): return image elif isinstance(A__ ,PIL.Image.Image ): UpperCAmelCase_ : int = [image] if isinstance(image[0] ,PIL.Image.Image ): UpperCAmelCase_ : List[Any] = [np.array(i.resize((w, h) ,resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image] UpperCAmelCase_ : Dict = np.concatenate(A__ ,axis=0 ) UpperCAmelCase_ : List[Any] = np.array(A__ ).astype(np.floataa ) / 255.0 UpperCAmelCase_ : List[str] = image.transpose(0 ,3 ,1 ,2 ) UpperCAmelCase_ : Any = 2.0 * image - 1.0 UpperCAmelCase_ : Optional[Any] = torch.from_numpy(A__ ) elif isinstance(image[0] ,torch.Tensor ): UpperCAmelCase_ : Tuple = torch.cat(A__ ,dim=0 ) return image def snake_case ( A__ ,A__ ,A__ ,A__=0.9995 ): if not isinstance(A__ ,np.ndarray ): UpperCAmelCase_ : Any = True UpperCAmelCase_ : Optional[Any] = va.device UpperCAmelCase_ : Any = va.cpu().numpy() UpperCAmelCase_ : Union[str, Any] = va.cpu().numpy() UpperCAmelCase_ : List[str] = np.sum(va * va / (np.linalg.norm(A__ ) * np.linalg.norm(A__ )) ) if np.abs(A__ ) > DOT_THRESHOLD: UpperCAmelCase_ : str = (1 - t) * va + t * va else: UpperCAmelCase_ : Union[str, Any] = np.arccos(A__ ) UpperCAmelCase_ : Optional[Any] = np.sin(A__ ) UpperCAmelCase_ : Optional[int] = theta_a * t UpperCAmelCase_ : Any = np.sin(A__ ) UpperCAmelCase_ : int = np.sin(theta_a - theta_t ) / sin_theta_a UpperCAmelCase_ : int = sin_theta_t / sin_theta_a UpperCAmelCase_ : int = sa * va + sa * va if inputs_are_torch: UpperCAmelCase_ : List[Any] = torch.from_numpy(A__ ).to(A__ ) return va def snake_case ( A__ ,A__ ): UpperCAmelCase_ : Dict = F.normalize(A__ ,dim=-1 ) UpperCAmelCase_ : str = F.normalize(A__ ,dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def snake_case ( A__ ,A__ ): for param in model.parameters(): UpperCAmelCase_ : List[str] = value class UpperCamelCase_ (__A ): def __init__( self : Optional[int] , lowerCAmelCase_ : AutoencoderKL , lowerCAmelCase_ : CLIPTextModel , lowerCAmelCase_ : CLIPModel , lowerCAmelCase_ : CLIPTokenizer , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , lowerCAmelCase_ : CLIPFeatureExtractor , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Tuple=None , ) -> Any: super().__init__() self.register_modules( vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , clip_model=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , coca_model=lowerCAmelCase_ , coca_tokenizer=lowerCAmelCase_ , coca_transform=lowerCAmelCase_ , ) UpperCAmelCase_ : str = ( feature_extractor.size if isinstance(feature_extractor.size , lowerCAmelCase_ ) else feature_extractor.size["shortest_edge"] ) UpperCAmelCase_ : str = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , lowerCAmelCase_ ) set_requires_grad(self.clip_model , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Optional[Union[str, int]] = "auto" ) -> Union[str, Any]: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCAmelCase_ : List[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: self.enable_attention_slicing(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: set_requires_grad(self.vae , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> int: set_requires_grad(self.vae , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]: set_requires_grad(self.unet , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: set_requires_grad(self.unet , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ) -> List[str]: # get the original timestep using init_timestep UpperCAmelCase_ : Optional[Any] = min(int(num_inference_steps * strength ) , lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = max(num_inference_steps - init_timestep , 0 ) UpperCAmelCase_ : Union[str, Any] = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int]=None ) -> Tuple: if not isinstance(lowerCAmelCase_ , torch.Tensor ): raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(lowerCAmelCase_ )}""" ) UpperCAmelCase_ : Optional[Any] = image.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): UpperCAmelCase_ : Dict = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCAmelCase_ ) ] UpperCAmelCase_ : str = torch.cat(lowerCAmelCase_ , dim=0 ) else: UpperCAmelCase_ : Optional[int] = self.vae.encode(lowerCAmelCase_ ).latent_dist.sample(lowerCAmelCase_ ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor UpperCAmelCase_ : Any = 0.1_8_2_1_5 * init_latents UpperCAmelCase_ : List[str] = init_latents.repeat_interleave(lowerCAmelCase_ , dim=0 ) UpperCAmelCase_ : Optional[Any] = randn_tensor(init_latents.shape , generator=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ) # get latents UpperCAmelCase_ : Dict = self.scheduler.add_noise(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = init_latents return latents def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[str] ) -> int: UpperCAmelCase_ : Any = self.coca_transform(lowerCAmelCase_ ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): UpperCAmelCase_ : int = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) UpperCAmelCase_ : List[Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] ) -> List[Any]: UpperCAmelCase_ : Optional[Any] = self.feature_extractor.preprocess(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half() UpperCAmelCase_ : List[Any] = self.clip_model.get_image_features(lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowerCAmelCase_ ) UpperCAmelCase_ : str = image_embeddings_clip.repeat_interleave(lowerCAmelCase_ , dim=0 ) return image_embeddings_clip @torch.enable_grad() def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , ) -> List[str]: UpperCAmelCase_ : str = latents.detach().requires_grad_() UpperCAmelCase_ : List[Any] = self.scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ ) # predict the noise residual UpperCAmelCase_ : int = self.unet(lowerCAmelCase_ , lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): UpperCAmelCase_ : str = self.scheduler.alphas_cumprod[timestep] UpperCAmelCase_ : int = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf UpperCAmelCase_ : List[Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 UpperCAmelCase_ : Union[str, Any] = torch.sqrt(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , lowerCAmelCase_ ): UpperCAmelCase_ : Any = self.scheduler.sigmas[index] UpperCAmelCase_ : Any = latents - sigma * noise_pred else: raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor UpperCAmelCase_ : List[Any] = 1 / 0.1_8_2_1_5 * sample UpperCAmelCase_ : Dict = self.vae.decode(lowerCAmelCase_ ).sample UpperCAmelCase_ : str = (image / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase_ : Any = transforms.Resize(self.feature_extractor_size )(lowerCAmelCase_ ) UpperCAmelCase_ : Any = self.normalize(lowerCAmelCase_ ).to(latents.dtype ) UpperCAmelCase_ : Union[str, Any] = self.clip_model.get_image_features(lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowerCAmelCase_ ) UpperCAmelCase_ : Dict = spherical_dist_loss(lowerCAmelCase_ , lowerCAmelCase_ ).mean() * clip_guidance_scale UpperCAmelCase_ : str = -torch.autograd.grad(lowerCAmelCase_ , lowerCAmelCase_ )[0] if isinstance(self.scheduler , lowerCAmelCase_ ): UpperCAmelCase_ : str = latents.detach() + grads * (sigma**2) UpperCAmelCase_ : Optional[Any] = noise_pred_original else: UpperCAmelCase_ : Optional[int] = noise_pred_original - torch.sqrt(lowerCAmelCase_ ) * grads return noise_pred, latents @torch.no_grad() def __call__( self : Union[str, Any] , lowerCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image] , lowerCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image] , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[int] = 512 , lowerCAmelCase_ : Optional[int] = 512 , lowerCAmelCase_ : float = 0.6 , lowerCAmelCase_ : Optional[int] = 50 , lowerCAmelCase_ : Optional[float] = 7.5 , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : Optional[float] = 100 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : float = 0.8 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , ) -> int: if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) != batch_size: raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(lowerCAmelCase_ )} generators.""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if isinstance(lowerCAmelCase_ , torch.Generator ) and batch_size > 1: UpperCAmelCase_ : Optional[Any] = [generator] + [None] * (batch_size - 1) UpperCAmelCase_ : Optional[int] = [ ("model", self.coca_model is None), ("tokenizer", self.coca_tokenizer is None), ("transform", self.coca_transform is None), ] UpperCAmelCase_ : Dict = [x[0] for x in coca_is_none if x[1]] UpperCAmelCase_ : str = ", ".join(lowerCAmelCase_ ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(lowerCAmelCase_ ): raise ValueError( f"""Content prompt is None and CoCa [{coca_is_none_str}] is None.""" f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) UpperCAmelCase_ : List[str] = self.get_image_description(lowerCAmelCase_ ) if style_prompt is None: if len(lowerCAmelCase_ ): raise ValueError( f"""Style prompt is None and CoCa [{coca_is_none_str}] is None.""" f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) UpperCAmelCase_ : List[str] = self.get_image_description(lowerCAmelCase_ ) # get prompt text embeddings for content and style UpperCAmelCase_ : str = self.tokenizer( lowerCAmelCase_ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=lowerCAmelCase_ , return_tensors="pt" , ) UpperCAmelCase_ : Optional[int] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] UpperCAmelCase_ : int = self.tokenizer( lowerCAmelCase_ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=lowerCAmelCase_ , return_tensors="pt" , ) UpperCAmelCase_ : List[Any] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] UpperCAmelCase_ : str = slerp(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # duplicate text embeddings for each generation per prompt UpperCAmelCase_ : Dict = text_embeddings.repeat_interleave(lowerCAmelCase_ , dim=0 ) # set timesteps UpperCAmelCase_ : List[str] = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) UpperCAmelCase_ : List[str] = {} if accepts_offset: UpperCAmelCase_ : Optional[Any] = 1 self.scheduler.set_timesteps(lowerCAmelCase_ , **lowerCAmelCase_ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) UpperCAmelCase_ , UpperCAmelCase_ : str = self.get_timesteps(lowerCAmelCase_ , lowerCAmelCase_ , self.device ) UpperCAmelCase_ : List[str] = timesteps[:1].repeat(lowerCAmelCase_ ) # Preprocess image UpperCAmelCase_ : Optional[Any] = preprocess(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Any = self.prepare_latents( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , text_embeddings.dtype , self.device , lowerCAmelCase_ ) UpperCAmelCase_ : Any = preprocess(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Dict = self.prepare_latents( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , text_embeddings.dtype , self.device , lowerCAmelCase_ ) UpperCAmelCase_ : str = slerp(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if clip_guidance_scale > 0: UpperCAmelCase_ : int = self.get_clip_image_embeddings(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Any = self.get_clip_image_embeddings(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = slerp( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. UpperCAmelCase_ : List[Any] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: UpperCAmelCase_ : List[str] = content_text_input.input_ids.shape[-1] UpperCAmelCase_ : Tuple = self.tokenizer([""] , padding="max_length" , max_length=lowerCAmelCase_ , return_tensors="pt" ) UpperCAmelCase_ : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt UpperCAmelCase_ : Optional[int] = uncond_embeddings.repeat_interleave(lowerCAmelCase_ , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase_ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. UpperCAmelCase_ : List[Any] = (batch_size, self.unet.config.in_channels, height // 8, width // 8) UpperCAmelCase_ : Union[str, Any] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps UpperCAmelCase_ : Tuple = torch.randn(lowerCAmelCase_ , generator=lowerCAmelCase_ , device="cpu" , dtype=lowerCAmelCase_ ).to( self.device ) else: UpperCAmelCase_ : Optional[Any] = torch.randn(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device , dtype=lowerCAmelCase_ ) else: if latents.shape != latents_shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) UpperCAmelCase_ : Any = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler UpperCAmelCase_ : List[Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCAmelCase_ : str = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCAmelCase_ : Optional[int] = {} if accepts_eta: UpperCAmelCase_ : Any = eta # check if the scheduler accepts generator UpperCAmelCase_ : Optional[int] = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: UpperCAmelCase_ : List[Any] = generator with self.progress_bar(total=lowerCAmelCase_ ): for i, t in enumerate(lowerCAmelCase_ ): # expand the latents if we are doing classifier free guidance UpperCAmelCase_ : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase_ : int = self.scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ ) # predict the noise residual UpperCAmelCase_ : List[Any] = self.unet(lowerCAmelCase_ , lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ ).sample # perform classifier free guidance if do_classifier_free_guidance: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = noise_pred.chunk(2 ) UpperCAmelCase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: UpperCAmelCase_ : Union[str, Any] = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.cond_fn( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase_ : int = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor UpperCAmelCase_ : Tuple = 1 / 0.1_8_2_1_5 * latents UpperCAmelCase_ : Any = self.vae.decode(lowerCAmelCase_ ).sample UpperCAmelCase_ : List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCAmelCase_ : List[str] = self.numpy_to_pil(lowerCAmelCase_ ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=lowerCAmelCase_ , nsfw_content_detected=lowerCAmelCase_ )
95
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' if len(SCREAMING_SNAKE_CASE ) < 2: return collection def circle_sort_util(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool: A_ = False if low == high: return swapped A_ = low A_ = high while left < right: if collection[left] > collection[right]: A_ ,A_ = ( collection[right], collection[left], ) A_ = True left += 1 right -= 1 if left == right and collection[left] > collection[right + 1]: A_ ,A_ = ( collection[right + 1], collection[left], ) A_ = True A_ = low + int((high - low) / 2 ) A_ = circle_sort_util(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A_ = circle_sort_util(SCREAMING_SNAKE_CASE , mid + 1 , SCREAMING_SNAKE_CASE ) return swapped or left_swap or right_swap A_ = True while is_not_sorted is True: A_ = circle_sort_util(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) - 1 ) return collection if __name__ == "__main__": __lowercase = input("""Enter numbers separated by a comma:\n""").strip() __lowercase = [int(item) for item in user_input.split(""",""")] print(circle_sort(unsorted))
203
0
"""simple docstring""" from __future__ import annotations from collections.abc import Callable UpperCamelCase_ : Tuple = list[list[float | int]] def __lowercase ( a : Matrix , a : Matrix ) -> Matrix: __snake_case : int =len(a ) __snake_case : Matrix =[[0 for _ in range(size + 1 )] for _ in range(a )] __snake_case : int __snake_case : int __snake_case : int __snake_case : int __snake_case : int __snake_case : float for row in range(a ): for col in range(a ): __snake_case : Optional[Any] =matrix[row][col] __snake_case : List[str] =vector[row][0] __snake_case : Optional[int] =0 __snake_case : Union[str, Any] =0 while row < size and col < size: # pivoting __snake_case : List[str] =max((abs(augmented[rowa][col] ), rowa) for rowa in range(a , a ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: __snake_case : Optional[Any] =augmented[pivot_row], augmented[row] for rowa in range(row + 1 , a ): __snake_case : List[Any] =augmented[rowa][col] / augmented[row][col] __snake_case : List[str] =0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , a ): for row in range(a ): __snake_case : List[Any] =augmented[row][col] / augmented[col][col] for cola in range(a , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(a ) ] def __lowercase ( a : list[int] ) -> Callable[[int], int]: __snake_case : int =len(a ) __snake_case : Matrix =[[0 for _ in range(a )] for _ in range(a )] __snake_case : Matrix =[[0] for _ in range(a )] __snake_case : Matrix __snake_case : int __snake_case : int __snake_case : int for x_val, y_val in enumerate(a ): for col in range(a ): __snake_case : List[Any] =(x_val + 1) ** (size - col - 1) __snake_case : Optional[int] =y_val __snake_case : Dict =solve(a , a ) def interpolated_func(a : int ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(a ) ) return interpolated_func def __lowercase ( a : int ) -> int: return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def __lowercase ( a : Callable[[int], int] = question_function , a : int = 10 ) -> int: __snake_case : list[int] =[func(a ) for x_val in range(1 , order + 1 )] __snake_case : list[Callable[[int], int]] =[ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] __snake_case : int =0 __snake_case : Callable[[int], int] __snake_case : int for poly in polynomials: __snake_case : Optional[Any] =1 while func(a ) == poly(a ): x_val += 1 ret += poly(a ) return ret if __name__ == "__main__": print(F'''{solution() = }''')
713
"""simple docstring""" import tempfile import unittest import numpy as np from diffusers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionPipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _lowercase ( lowerCAmelCase , unittest.TestCase ): _a : Tuple = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def _UpperCamelCase ( self : Dict , a : Optional[Any]=0 ): """simple docstring""" __snake_case : List[str] =np.random.RandomState(a ) __snake_case : Union[str, Any] ={ '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def _UpperCamelCase ( self : str ): """simple docstring""" __snake_case : Dict =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=a ) __snake_case : Tuple =self.get_dummy_inputs() __snake_case : List[str] =pipe(**a ).images __snake_case : int =image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) __snake_case : str =np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _UpperCamelCase ( self : Union[str, Any] ): """simple docstring""" __snake_case : Dict =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __snake_case : Dict =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a ) pipe.set_progress_bar_config(disable=a ) __snake_case : List[Any] =self.get_dummy_inputs() __snake_case : Dict =pipe(**a ).images __snake_case : Union[str, Any] =image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) __snake_case : Optional[Any] =np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _UpperCamelCase ( self : Optional[int] ): """simple docstring""" __snake_case : Any =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __snake_case : Any =LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a ) __snake_case : int =self.get_dummy_inputs() __snake_case : List[str] =pipe(**a ).images __snake_case : str =image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) __snake_case : int =np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _UpperCamelCase ( self : List[str] ): """simple docstring""" __snake_case : Optional[int] =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __snake_case : Optional[Any] =EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a ) __snake_case : str =self.get_dummy_inputs() __snake_case : int =pipe(**a ).images __snake_case : str =image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) __snake_case : Union[str, Any] =np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _UpperCamelCase ( self : List[Any] ): """simple docstring""" __snake_case : Any =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __snake_case : List[str] =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a ) __snake_case : List[str] =self.get_dummy_inputs() __snake_case : Dict =pipe(**a ).images __snake_case : List[Any] =image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) __snake_case : Tuple =np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _UpperCamelCase ( self : Dict ): """simple docstring""" __snake_case : Tuple =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __snake_case : Any =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a ) __snake_case : Tuple =self.get_dummy_inputs() __snake_case : Tuple =pipe(**a ).images __snake_case : Optional[Any] =image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) __snake_case : Dict =np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _UpperCamelCase ( self : int ): """simple docstring""" __snake_case : Dict =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=a ) __snake_case : Optional[int] =self.get_dummy_inputs() __snake_case : Any =3 * [inputs['''prompt''']] # forward __snake_case : Any =pipe(**a ) __snake_case : str =output.images[0, -3:, -3:, -1] __snake_case : Tuple =self.get_dummy_inputs() __snake_case : Any =3 * [inputs.pop('''prompt''' )] __snake_case : Optional[Any] =pipe.tokenizer( a , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=a , return_tensors='''np''' , ) __snake_case : List[Any] =text_inputs['''input_ids'''] __snake_case : str =pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] __snake_case : Optional[Any] =prompt_embeds # forward __snake_case : Dict =pipe(**a ) __snake_case : Any =output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 def _UpperCamelCase ( self : Any ): """simple docstring""" __snake_case : Dict =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=a ) __snake_case : List[Any] =self.get_dummy_inputs() __snake_case : Optional[Any] =3 * ['''this is a negative prompt'''] __snake_case : List[str] =negative_prompt __snake_case : str =3 * [inputs['''prompt''']] # forward __snake_case : int =pipe(**a ) __snake_case : Union[str, Any] =output.images[0, -3:, -3:, -1] __snake_case : Tuple =self.get_dummy_inputs() __snake_case : Union[str, Any] =3 * [inputs.pop('''prompt''' )] __snake_case : Optional[int] =[] for p in [prompt, negative_prompt]: __snake_case : Optional[int] =pipe.tokenizer( a , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=a , return_tensors='''np''' , ) __snake_case : Optional[Any] =text_inputs['''input_ids'''] embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] ) __snake_case , __snake_case : Optional[Any] =embeds # forward __snake_case : Any =pipe(**a ) __snake_case : Union[str, Any] =output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 @nightly @require_onnxruntime @require_torch_gpu class _lowercase ( unittest.TestCase ): @property def _UpperCamelCase ( self : List[Any] ): """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _UpperCamelCase ( self : Union[str, Any] ): """simple docstring""" __snake_case : List[str] =ort.SessionOptions() __snake_case : Optional[int] =False return options def _UpperCamelCase ( self : Any ): """simple docstring""" __snake_case : List[Any] =OnnxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=a ) __snake_case : List[str] ='''A painting of a squirrel eating a burger''' np.random.seed(0 ) __snake_case : Tuple =sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=1_0 , output_type='''np''' ) __snake_case : Union[str, Any] =output.images __snake_case : Optional[Any] =image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __snake_case : Any =np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _UpperCamelCase ( self : Optional[Any] ): """simple docstring""" __snake_case : List[str] =DDIMScheduler.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' ) __snake_case : List[Any] =OnnxStableDiffusionPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=a ) __snake_case : Optional[Any] ='''open neural network exchange''' __snake_case : Optional[int] =np.random.RandomState(0 ) __snake_case : int =sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=a , output_type='''np''' ) __snake_case : Union[str, Any] =output.images __snake_case : str =image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __snake_case : Union[str, Any] =np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _UpperCamelCase ( self : Tuple ): """simple docstring""" __snake_case : List[str] =LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' ) __snake_case : int =OnnxStableDiffusionPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=a ) __snake_case : Optional[int] ='''open neural network exchange''' __snake_case : Optional[Any] =np.random.RandomState(0 ) __snake_case : Any =sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=a , output_type='''np''' ) __snake_case : Optional[int] =output.images __snake_case : Any =image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __snake_case : Optional[int] =np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _UpperCamelCase ( self : Union[str, Any] ): """simple docstring""" __snake_case : Union[str, Any] =0 def test_callback_fn(a : int , a : int , a : np.ndarray ) -> None: __snake_case : Dict =True nonlocal number_of_steps number_of_steps += 1 if step == 0: assert latents.shape == (1, 4, 6_4, 6_4) __snake_case : Union[str, Any] =latents[0, -3:, -3:, -1] __snake_case : str =np.array( [-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3 elif step == 5: assert latents.shape == (1, 4, 6_4, 6_4) __snake_case : List[Any] =latents[0, -3:, -3:, -1] __snake_case : List[Any] =np.array( [-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3 __snake_case : str =False __snake_case : int =OnnxStableDiffusionPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=a ) __snake_case : List[Any] ='''Andromeda galaxy in a bottle''' __snake_case : Optional[int] =np.random.RandomState(0 ) pipe( prompt=a , num_inference_steps=5 , guidance_scale=7.5 , generator=a , callback=a , callback_steps=1 , ) assert test_callback_fn.has_been_called assert number_of_steps == 6 def _UpperCamelCase ( self : List[Any] ): """simple docstring""" __snake_case : Optional[Any] =OnnxStableDiffusionPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , ) assert isinstance(a , a ) assert pipe.safety_checker is None __snake_case : int =pipe('''example prompt''' , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(a ) __snake_case : List[Any] =OnnxStableDiffusionPipeline.from_pretrained(a ) # sanity check that the pipeline still works assert pipe.safety_checker is None __snake_case : Any =pipe('''example prompt''' , num_inference_steps=2 ).images[0] assert image is not None
497
0
'''simple docstring''' def A (__lowerCamelCase :int = 100 ): _lowerCAmelCase = 0 _lowerCAmelCase = 0 for i in range(1 , n + 1 ): sum_of_squares += i**2 sum_of_ints += i return sum_of_ints**2 - sum_of_squares if __name__ == "__main__": print(F"""{solution() = }""")
5
from PIL import Image def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Image: def brightness(SCREAMING_SNAKE_CASE ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError('level must be between -255.0 (black) and 255.0 (white)' ) return img.point(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": # Load image with Image.open("image_data/lena.jpg") as img: # Change brightness to 100 UpperCamelCase = change_brightness(img, 100) brigt_img.save("image_data/lena_brightness.png", format="png")
66
0
"""simple docstring""" def _lowerCamelCase( a ): __a = [0] * len(a ) for i in range(1 , len(a ) ): # use last results for better performance - dynamic programming __a = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: __a = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 __a = j return prefix_result def _lowerCamelCase( a ): return max(prefix_function(a ) ) if __name__ == "__main__": import doctest doctest.testmod()
67
"""simple docstring""" import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def _lowerCamelCase( a , a , a ): __a = OmegaConf.load(a ) __a = torch.load(a , map_location="cpu" )["model"] __a = list(state_dict.keys() ) # extract state_dict for VQVAE __a = {} __a = "first_stage_model." for key in keys: if key.startswith(a ): __a = state_dict[key] # extract state_dict for UNetLDM __a = {} __a = "model.diffusion_model." for key in keys: if key.startswith(a ): __a = state_dict[key] __a = config.model.params.first_stage_config.params __a = config.model.params.unet_config.params __a = VQModel(**a ).eval() vqvae.load_state_dict(a ) __a = UNetLDMModel(**a ).eval() unet.load_state_dict(a ) __a = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=a , ) __a = LDMPipeline(a , a , a ) pipeline.save_pretrained(a ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__:List[Any] = argparse.ArgumentParser() parser.add_argument("""--checkpoint_path""", type=str, required=True) parser.add_argument("""--config_path""", type=str, required=True) parser.add_argument("""--output_path""", type=str, required=True) SCREAMING_SNAKE_CASE__:Union[str, Any] = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
67
1
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ): if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): lowercase = F'''Input value of [number={number}] must be an integer''' raise TypeError(__SCREAMING_SNAKE_CASE ) if number < 1: lowercase = F'''Input value of [number={number}] must be > 0''' raise ValueError(__SCREAMING_SNAKE_CASE ) lowercase = 1 for i in range(1 , __SCREAMING_SNAKE_CASE ): current_number *= 4 * i - 2 current_number //= i + 1 return current_number if __name__ == "__main__": import doctest doctest.testmod()
84
"""simple docstring""" import unittest from transformers import DebertaVaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaVaForMaskedLM, DebertaVaForMultipleChoice, DebertaVaForQuestionAnswering, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaModel, ) from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST class __lowercase( lowercase__ ): '''simple docstring''' def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=False , __a=True , __a="None" , __a=3 , __a=4 , __a=None , ): __lowerCamelCase : List[str] = parent __lowerCamelCase : Optional[int] = batch_size __lowerCamelCase : Union[str, Any] = seq_length __lowerCamelCase : Optional[int] = is_training __lowerCamelCase : List[str] = use_input_mask __lowerCamelCase : Dict = use_token_type_ids __lowerCamelCase : Dict = use_labels __lowerCamelCase : Union[str, Any] = vocab_size __lowerCamelCase : List[Any] = hidden_size __lowerCamelCase : int = num_hidden_layers __lowerCamelCase : int = num_attention_heads __lowerCamelCase : int = intermediate_size __lowerCamelCase : Dict = hidden_act __lowerCamelCase : Optional[int] = hidden_dropout_prob __lowerCamelCase : Any = attention_probs_dropout_prob __lowerCamelCase : List[str] = max_position_embeddings __lowerCamelCase : Tuple = type_vocab_size __lowerCamelCase : List[Any] = type_sequence_label_size __lowerCamelCase : Optional[Any] = initializer_range __lowerCamelCase : Any = num_labels __lowerCamelCase : Union[str, Any] = num_choices __lowerCamelCase : int = relative_attention __lowerCamelCase : Tuple = position_biased_input __lowerCamelCase : int = pos_att_type __lowerCamelCase : Dict = scope def snake_case_ ( self ): __lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase : Dict = None if self.use_input_mask: __lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __lowerCamelCase : Dict = None if self.use_token_type_ids: __lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase : Any = None __lowerCamelCase : Optional[int] = None __lowerCamelCase : Union[str, Any] = None if self.use_labels: __lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __lowerCamelCase : List[str] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case_ ( self ): return DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def snake_case_ ( self , __a ): self.parent.assertListEqual(list(result.loss.size() ) , [] ) def snake_case_ ( self , __a , __a , __a , __a , __a , __a , __a ): __lowerCamelCase : Optional[int] = DebertaVaModel(config=__a ) model.to(__a ) model.eval() __lowerCamelCase : int = model(__a , attention_mask=__a , token_type_ids=__a )[0] __lowerCamelCase : Optional[int] = model(__a , token_type_ids=__a )[0] __lowerCamelCase : Optional[Any] = model(__a )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def snake_case_ ( self , __a , __a , __a , __a , __a , __a , __a ): __lowerCamelCase : List[str] = DebertaVaForMaskedLM(config=__a ) model.to(__a ) model.eval() __lowerCamelCase : List[str] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case_ ( self , __a , __a , __a , __a , __a , __a , __a ): __lowerCamelCase : str = self.num_labels __lowerCamelCase : Tuple = DebertaVaForSequenceClassification(__a ) model.to(__a ) model.eval() __lowerCamelCase : Optional[int] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(__a ) def snake_case_ ( self , __a , __a , __a , __a , __a , __a , __a ): __lowerCamelCase : Optional[Any] = self.num_labels __lowerCamelCase : List[str] = DebertaVaForTokenClassification(config=__a ) model.to(__a ) model.eval() __lowerCamelCase : List[Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case_ ( self , __a , __a , __a , __a , __a , __a , __a ): __lowerCamelCase : Optional[int] = DebertaVaForQuestionAnswering(config=__a ) model.to(__a ) model.eval() __lowerCamelCase : List[str] = model( __a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case_ ( self , __a , __a , __a , __a , __a , __a , __a ): __lowerCamelCase : Optional[Any] = DebertaVaForMultipleChoice(config=__a ) model.to(__a ) model.eval() __lowerCamelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase : str = model( __a , attention_mask=__a , token_type_ids=__a , labels=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def snake_case_ ( self ): __lowerCamelCase : Tuple = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) : int = config_and_inputs __lowerCamelCase : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __lowercase( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' __a : str = ( ( DebertaVaModel, DebertaVaForMaskedLM, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaForQuestionAnswering, DebertaVaForMultipleChoice, ) if is_torch_available() else () ) __a : Optional[int] = ( { 'feature-extraction': DebertaVaModel, 'fill-mask': DebertaVaForMaskedLM, 'question-answering': DebertaVaForQuestionAnswering, 'text-classification': DebertaVaForSequenceClassification, 'token-classification': DebertaVaForTokenClassification, 'zero-shot': DebertaVaForSequenceClassification, } if is_torch_available() else {} ) __a : Tuple = True __a : List[Any] = False __a : Any = False __a : Tuple = False __a : Tuple = False def snake_case_ ( self ): __lowerCamelCase : Optional[int] = DebertaVaModelTester(self ) __lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=__a , hidden_size=37 ) def snake_case_ ( self ): self.config_tester.run_common_tests() def snake_case_ ( self ): __lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*__a ) def snake_case_ ( self ): __lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*__a ) def snake_case_ ( self ): __lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*__a ) def snake_case_ ( self ): __lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*__a ) def snake_case_ ( self ): __lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*__a ) def snake_case_ ( self ): __lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_multiple_choice(*__a ) @slow def snake_case_ ( self ): for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase : List[str] = DebertaVaModel.from_pretrained(__a ) self.assertIsNotNone(__a ) @require_torch @require_sentencepiece @require_tokenizers class __lowercase( unittest.TestCase ): '''simple docstring''' @unittest.skip(reason='Model not available yet' ) def snake_case_ ( self ): pass @slow def snake_case_ ( self ): __lowerCamelCase : Optional[int] = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' ) __lowerCamelCase : Dict = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] ) __lowerCamelCase : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowerCamelCase : Union[str, Any] = model(__a , attention_mask=__a )[0] # compare the actual values for a slice. __lowerCamelCase : Optional[int] = torch.tensor( [[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' )
594
0
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer A : List[str] = logging.get_logger(__name__) A : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} A : Union[str, Any] = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } A : Optional[int] = { "allenai/led-base-16384": 16384, } class _lowercase ( lowercase__): """simple docstring""" A__ = VOCAB_FILES_NAMES A__ = PRETRAINED_VOCAB_FILES_MAP A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ = LEDTokenizer A__ = ["input_ids", "attention_mask"] def __init__( self : List[Any] , __lowerCamelCase : str=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]="replace" , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Optional[int]="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : str="<unk>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : int=True , **__lowerCamelCase : List[Any] , ): '''simple docstring''' super().__init__( __lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , ) lowerCamelCase__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space: lowerCamelCase__ : List[str] = getattr(__lowerCamelCase , pre_tok_state.pop("type" ) ) lowerCamelCase__ : Tuple = add_prefix_space lowerCamelCase__ : str = pre_tok_class(**__lowerCamelCase ) lowerCamelCase__ : List[str] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` lowerCamelCase__ : Dict = "post_processor" lowerCamelCase__ : List[Any] = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase ) if tokenizer_component_instance: lowerCamelCase__ : str = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowerCamelCase__ : List[str] = tuple(state["sep"] ) if "cls" in state: lowerCamelCase__ : List[str] = tuple(state["cls"] ) lowerCamelCase__ : str = False if state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space: lowerCamelCase__ : Any = add_prefix_space lowerCamelCase__ : Dict = True if state.get("trim_offsets" , __lowerCamelCase ) != trim_offsets: lowerCamelCase__ : int = trim_offsets lowerCamelCase__ : Any = True if changes_to_apply: lowerCamelCase__ : Union[str, Any] = getattr(__lowerCamelCase , state.pop("type" ) ) lowerCamelCase__ : Union[str, Any] = component_class(**__lowerCamelCase ) setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def lowerCAmelCase ( self : List[str] ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : List[str] ): '''simple docstring''' lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value lowerCamelCase__ : Optional[Any] = value def lowerCAmelCase ( self : Tuple , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = kwargs.get("is_split_into_words" , __lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase ) def lowerCAmelCase ( self : Optional[Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : Any ): '''simple docstring''' lowerCamelCase__ : List[Any] = kwargs.get("is_split_into_words" , __lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase ) def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): '''simple docstring''' lowerCamelCase__ : Dict = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase ) return tuple(__lowerCamelCase ) def lowerCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict=None ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowerCAmelCase ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = [self.sep_token_id] lowerCamelCase__ : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = super()._pad( encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: lowerCamelCase__ : Optional[Any] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowerCamelCase__ : Any = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowerCamelCase__ : int = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase ) if needs_to_be_padded: lowerCamelCase__ : Any = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowerCamelCase__ : Optional[Any] = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": lowerCamelCase__ : Dict = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
5
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging A : List[Any] = logging.get_logger(__name__) A : Any = { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class _lowercase ( lowercase__): """simple docstring""" A__ = "blenderbot-small" A__ = ["past_key_values"] A__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Dict , __lowerCamelCase : List[str]=50265 , __lowerCamelCase : str=512 , __lowerCamelCase : Tuple=8 , __lowerCamelCase : str=2048 , __lowerCamelCase : str=16 , __lowerCamelCase : List[Any]=8 , __lowerCamelCase : Any=2048 , __lowerCamelCase : List[str]=16 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Tuple=512 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : int=0.0 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Dict=False , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : str=2 , __lowerCamelCase : Any=2 , **__lowerCamelCase : int , ): '''simple docstring''' lowerCamelCase__ : str = vocab_size lowerCamelCase__ : Union[str, Any] = max_position_embeddings lowerCamelCase__ : Union[str, Any] = d_model lowerCamelCase__ : Optional[int] = encoder_ffn_dim lowerCamelCase__ : Dict = encoder_layers lowerCamelCase__ : Any = encoder_attention_heads lowerCamelCase__ : Union[str, Any] = decoder_ffn_dim lowerCamelCase__ : str = decoder_layers lowerCamelCase__ : Optional[Any] = decoder_attention_heads lowerCamelCase__ : List[str] = dropout lowerCamelCase__ : List[Any] = attention_dropout lowerCamelCase__ : Dict = activation_dropout lowerCamelCase__ : Optional[Any] = activation_function lowerCamelCase__ : Dict = init_std lowerCamelCase__ : List[str] = encoder_layerdrop lowerCamelCase__ : Dict = decoder_layerdrop lowerCamelCase__ : int = use_cache lowerCamelCase__ : List[Any] = encoder_layers lowerCamelCase__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , ) class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : int = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: lowerCamelCase__ : Union[str, Any] = {0: "batch"} lowerCamelCase__ : int = {0: "batch", 1: "past_decoder_sequence + sequence"} else: lowerCamelCase__ : Tuple = {0: "batch", 1: "decoder_sequence"} lowerCamelCase__ : str = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. lowerCamelCase__ : Tuple = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers for i in range(__lowerCamelCase ): lowerCamelCase__ : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"} lowerCamelCase__ : Optional[int] = {0: "batch", 2: "past_sequence + sequence"} else: lowerCamelCase__ : Any = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Union[str, Any] = super().outputs else: lowerCamelCase__ : int = super(__lowerCamelCase , self ).outputs if self.use_past: lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers for i in range(__lowerCamelCase ): lowerCamelCase__ : Tuple = {0: "batch", 2: "past_sequence + sequence"} lowerCamelCase__ : Any = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def lowerCAmelCase ( self : int , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Generate decoder inputs lowerCamelCase__ : List[str] = seq_length if not self.use_past else 1 lowerCamelCase__ : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Optional[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} lowerCamelCase__ : Optional[Any] = dict(**__lowerCamelCase , **__lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCamelCase__ , lowerCamelCase__ : Tuple = common_inputs["input_ids"].shape lowerCamelCase__ : int = common_inputs["decoder_input_ids"].shape[1] lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.num_attention_heads lowerCamelCase__ : str = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase__ : Optional[int] = decoder_seq_length + 3 lowerCamelCase__ : Dict = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowerCamelCase__ : List[Any] = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase )] , dim=1 ) lowerCamelCase__ : Optional[Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowerCamelCase__ , lowerCamelCase__ : str = self.num_layers lowerCamelCase__ : Union[str, Any] = min(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = max(__lowerCamelCase , __lowerCamelCase ) - min_num_layers lowerCamelCase__ : str = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(__lowerCamelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), ) ) # TODO: test this. lowerCamelCase__ : Optional[int] = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(__lowerCamelCase , __lowerCamelCase ): common_inputs["past_key_values"].append((torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) ) return common_inputs def lowerCAmelCase ( self : Tuple , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCamelCase__ , lowerCamelCase__ : int = common_inputs["input_ids"].shape # Not using the same length for past_key_values lowerCamelCase__ : str = seqlen + 2 lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.num_layers lowerCamelCase__ , lowerCamelCase__ : int = self.num_attention_heads lowerCamelCase__ : Tuple = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase__ : Union[str, Any] = common_inputs["attention_mask"].dtype lowerCamelCase__ : List[str] = torch.cat( [common_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 ) lowerCamelCase__ : Tuple = [ (torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(__lowerCamelCase ) ] return common_inputs def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : str = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCamelCase__ : List[str] = tokenizer.num_special_tokens_to_add(__lowerCamelCase ) lowerCamelCase__ : Dict = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase ) # Generate dummy inputs according to compute batch and sequence lowerCamelCase__ : Optional[int] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size lowerCamelCase__ : Optional[Any] = dict(tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase ) ) return common_inputs def lowerCAmelCase ( self : Any , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) elif self.task == "causal-lm": lowerCamelCase__ : Any = self._generate_dummy_inputs_for_causal_lm( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) else: lowerCamelCase__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) return common_inputs def lowerCAmelCase ( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Dict = super()._flatten_past_key_values_(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) else: lowerCamelCase__ : int = super(__lowerCamelCase , self )._flatten_past_key_values_( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
5
1
"""simple docstring""" def __snake_case ( _lowercase ): """simple docstring""" UpperCamelCase = int(_lowercase ) if n_element < 1: UpperCamelCase = ValueError('''a should be a positive number''' ) raise my_error UpperCamelCase = [1] UpperCamelCase , UpperCamelCase , UpperCamelCase = (0, 0, 0) UpperCamelCase = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 ,hamming_list[j] * 3 ,hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = input('Enter the last number (nth term) of the Hamming Number Series: ') print('Formula of Hamming Number Series => 2^i * 3^j * 5^k') SCREAMING_SNAKE_CASE_ = hamming(int(n)) print('-----------------------------------------------------') print(f'The list with nth numbers is: {hamming_numbers}') print('-----------------------------------------------------')
34
from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False): if radian_mode: return [magnitude * cos(_UpperCAmelCase), magnitude * sin(_UpperCAmelCase)] return [magnitude * cos(radians(_UpperCAmelCase)), magnitude * sin(radians(_UpperCAmelCase))] def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 10**-1): SCREAMING_SNAKE_CASE = cross(_UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE = sum(_UpperCAmelCase) return abs(_UpperCAmelCase) < eps if __name__ == "__main__": # Test to check if it works a_ : int = array( [ polar_force(718.4, 1_80 - 30), polar_force(879.54, 45), polar_force(1_00, -90), ] ) a_ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg a_ : Dict = array( [ polar_force(30 * 9.81, 15), polar_force(2_15, 1_80 - 45), polar_force(2_64, 90 - 30), ] ) a_ : Any = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg a_ : int = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]]) a_ : Optional[Any] = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
73
0
'''simple docstring''' from __future__ import annotations def __a ( __lowerCamelCase : list[float] ) -> float: '''simple docstring''' lowercase_ = 0.0_0 lowercase_ = 0 for resistor in resistors: if resistor <= 0: lowercase_ = f'Resistor at index {index} has a negative or zero value!' raise ValueError(__lowerCamelCase ) first_sum += 1 / float(__lowerCamelCase ) index += 1 return 1 / first_sum def __a ( __lowerCamelCase : list[float] ) -> float: '''simple docstring''' lowercase_ = 0.0_0 lowercase_ = 0 for resistor in resistors: sum_r += resistor if resistor < 0: lowercase_ = f'Resistor at index {index} has a negative value!' raise ValueError(__lowerCamelCase ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
710
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpta import GPTaTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase_ : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} lowerCAmelCase_ : Tuple = { "vocab_file": { "gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json", }, "merges_file": { "gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt", }, "tokenizer_file": { "gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json", }, } lowerCAmelCase_ : Dict = { "gpt2": 1_024, "gpt2-medium": 1_024, "gpt2-large": 1_024, "gpt2-xl": 1_024, "distilgpt2": 1_024, } class lowercase ( __lowerCamelCase ): lowerCamelCase_ =VOCAB_FILES_NAMES lowerCamelCase_ =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ =['input_ids', 'attention_mask'] lowerCamelCase_ =GPTaTokenizer def __init__( self : Dict , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Any="<|endoftext|>" , __lowerCAmelCase : Union[str, Any]="<|endoftext|>" , __lowerCAmelCase : List[Any]="<|endoftext|>" , __lowerCAmelCase : Optional[Any]=False , **__lowerCAmelCase : Dict , ) -> int: super().__init__( __lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , **__lowerCAmelCase , ) lowercase_ = kwargs.pop("add_bos_token" , __lowerCAmelCase) lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get("add_prefix_space" , __lowerCAmelCase) != add_prefix_space: lowercase_ = getattr(__lowerCAmelCase , pre_tok_state.pop("type")) lowercase_ = add_prefix_space lowercase_ = pre_tok_class(**__lowerCAmelCase) lowercase_ = add_prefix_space def __UpperCAmelCase ( self : List[str] , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : str) -> BatchEncoding: lowercase_ = kwargs.get("is_split_into_words" , __lowerCAmelCase) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase) def __UpperCAmelCase ( self : List[Any] , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Union[str, Any]) -> BatchEncoding: lowercase_ = kwargs.get("is_split_into_words" , __lowerCAmelCase) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase) def __UpperCAmelCase ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None) -> Tuple[str]: lowercase_ = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase) return tuple(__lowerCAmelCase) def __UpperCAmelCase ( self : Tuple , __lowerCAmelCase : "Conversation") -> List[int]: lowercase_ = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase) + [self.eos_token_id]) if len(__lowerCAmelCase) > self.model_max_length: lowercase_ = input_ids[-self.model_max_length :] return input_ids
461
0
'''simple docstring''' import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase__ ( A , unittest.TestCase ): lowerCAmelCase_ = LEDTokenizer lowerCAmelCase_ = LEDTokenizerFast lowerCAmelCase_ = True def lowerCamelCase_ ( self : Dict ): super().setUp() _lowerCamelCase : Tuple = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] _lowerCamelCase : Union[str, Any] = dict(zip(__A,range(len(__A ) ) ) ) _lowerCamelCase : List[str] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] _lowerCamelCase : Dict = {"unk_token": "<unk>"} _lowerCamelCase : Any = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] ) _lowerCamelCase : int = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file,"w",encoding="utf-8" ) as fp: fp.write(json.dumps(__A ) + "\n" ) with open(self.merges_file,"w",encoding="utf-8" ) as fp: fp.write("\n".join(__A ) ) def lowerCamelCase_ ( self : str,**__A : str ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname,**__A ) def lowerCamelCase_ ( self : Dict,**__A : Dict ): kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname,**__A ) def lowerCamelCase_ ( self : Any,__A : str ): return "lower newer", "lower newer" @cached_property def lowerCamelCase_ ( self : Optional[int] ): return LEDTokenizer.from_pretrained("allenai/led-base-16384" ) @cached_property def lowerCamelCase_ ( self : Optional[int] ): return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" ) @require_torch def lowerCamelCase_ ( self : List[Any] ): _lowerCamelCase : Optional[Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."] _lowerCamelCase : List[str] = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowerCamelCase : Any = tokenizer(__A,max_length=len(__A ),padding=__A,return_tensors="pt" ) self.assertIsInstance(__A,__A ) self.assertEqual((2, 9),batch.input_ids.shape ) self.assertEqual((2, 9),batch.attention_mask.shape ) _lowerCamelCase : Dict = batch.input_ids.tolist()[0] self.assertListEqual(__A,__A ) @require_torch def lowerCamelCase_ ( self : Any ): _lowerCamelCase : Dict = ["A long paragraph for summarization.", "Another paragraph for summarization."] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowerCamelCase : Optional[int] = tokenizer(__A,padding=__A,return_tensors="pt" ) self.assertIn("input_ids",__A ) self.assertIn("attention_mask",__A ) self.assertNotIn("labels",__A ) self.assertNotIn("decoder_attention_mask",__A ) @require_torch def lowerCamelCase_ ( self : str ): _lowerCamelCase : Dict = [ "Summary of the text.", "Another summary.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowerCamelCase : Optional[int] = tokenizer(text_target=__A,max_length=3_2,padding="max_length",return_tensors="pt" ) self.assertEqual(3_2,targets["input_ids"].shape[1] ) @require_torch def lowerCamelCase_ ( self : Union[str, Any] ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowerCamelCase : Tuple = tokenizer( ["I am a small frog" * 1_0_2_4, "I am a small frog"],padding=__A,truncation=__A,return_tensors="pt" ) self.assertIsInstance(__A,__A ) self.assertEqual(batch.input_ids.shape,(2, 5_1_2_2) ) @require_torch def lowerCamelCase_ ( self : Union[str, Any] ): _lowerCamelCase : Optional[Any] = ["A long paragraph for summarization."] _lowerCamelCase : Any = [ "Summary of the text.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowerCamelCase : int = tokenizer(__A,return_tensors="pt" ) _lowerCamelCase : str = tokenizer(text_target=__A,return_tensors="pt" ) _lowerCamelCase : Union[str, Any] = inputs["input_ids"] _lowerCamelCase : List[Any] = targets["input_ids"] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def lowerCamelCase_ ( self : Union[str, Any] ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowerCamelCase : Union[str, Any] = ["Summary of the text.", "Another summary."] _lowerCamelCase : Union[str, Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] _lowerCamelCase : Optional[Any] = tokenizer(__A,padding=__A ) _lowerCamelCase : Optional[int] = [[0] * len(__A ) for x in encoded_output["input_ids"]] _lowerCamelCase : Any = tokenizer.pad(__A ) self.assertSequenceEqual(outputs["global_attention_mask"],__A ) def lowerCamelCase_ ( self : Any ): pass def lowerCamelCase_ ( self : str ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): _lowerCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__A,**__A ) _lowerCamelCase : Dict = self.tokenizer_class.from_pretrained(__A,**__A ) _lowerCamelCase : Union[str, Any] = "A, <mask> AllenNLP sentence." _lowerCamelCase : Dict = tokenizer_r.encode_plus(__A,add_special_tokens=__A,return_token_type_ids=__A ) _lowerCamelCase : int = tokenizer_p.encode_plus(__A,add_special_tokens=__A,return_token_type_ids=__A ) self.assertEqual(sum(tokens_r["token_type_ids"] ),sum(tokens_p["token_type_ids"] ) ) self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ),sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ),) _lowerCamelCase : Optional[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) _lowerCamelCase : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) self.assertSequenceEqual(tokens_p["input_ids"],[0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"],[0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual( __A,["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( __A,["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
44
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) UpperCAmelCase_ : Any = logging.getLogger(__name__) @dataclass class UpperCAmelCase__ : lowerCAmelCase_ = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) lowerCAmelCase_ = field( default=A , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) lowerCAmelCase_ = field( default=A , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) lowerCAmelCase_ = field( default=A , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) lowerCAmelCase_ = field(default=A , metadata={'help': 'Whether tp freeze the encoder.'} ) lowerCAmelCase_ = field(default=A , metadata={'help': 'Whether to freeze the embeddings.'} ) @dataclass class UpperCAmelCase__ : lowerCAmelCase_ = field( metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} ) lowerCAmelCase_ = field( default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , ) lowerCAmelCase_ = field( default=1024 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) lowerCAmelCase_ = field( default=128 , metadata={ 'help': ( 'The maximum total sequence length for target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) lowerCAmelCase_ = field( default=142 , metadata={ 'help': ( 'The maximum total sequence length for validation target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded. ' 'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used ' 'during ``evaluate`` and ``predict``.' ) } , ) lowerCAmelCase_ = field( default=142 , metadata={ 'help': ( 'The maximum total sequence length for test target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) lowerCAmelCase_ = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} ) lowerCAmelCase_ = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} ) lowerCAmelCase_ = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} ) lowerCAmelCase_ = field(default=A , metadata={'help': 'Source language id for translation.'} ) lowerCAmelCase_ = field(default=A , metadata={'help': 'Target language id for translation.'} ) lowerCAmelCase_ = field(default=A , metadata={'help': '# num_beams to use for evaluation.'} ) lowerCAmelCase_ = field( default=A , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , ) def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any ): """simple docstring""" logger.info(F'***** {split} metrics *****' ) for key in sorted(metrics.keys() ): logger.info(F' {key} = {metrics[key]}' ) save_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , F'{split}_results.json' ) ) def A_ ( ): """simple docstring""" _lowerCamelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = parser.parse_args_into_dataclasses() check_output_dir(_lowerCAmelCase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("Training/evaluation parameters %s" , _lowerCAmelCase ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _lowerCamelCase : Optional[int] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _lowerCamelCase : Tuple = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") for p in extra_model_params: if getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): assert hasattr(_lowerCAmelCase , _lowerCAmelCase ), F'({config.__class__.__name__}) doesn\'t have a `{p}` attribute' setattr(_lowerCAmelCase , _lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) ) _lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _lowerCamelCase : int = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(_lowerCAmelCase , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: _lowerCamelCase : List[Any] = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(_lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _lowerCamelCase : Any = tokenizer.lang_code_to_id[data_args.tgt_lang] else: _lowerCamelCase : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(_lowerCAmelCase ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) _lowerCamelCase : int = SeqaSeqDataset # Get datasets _lowerCamelCase : Tuple = ( dataset_class( _lowerCAmelCase , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_train else None ) _lowerCamelCase : List[Any] = ( dataset_class( _lowerCAmelCase , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) _lowerCamelCase : Optional[int] = ( dataset_class( _lowerCAmelCase , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_predict else None ) # Initialize our Trainer _lowerCamelCase : int = ( build_compute_metrics_fn(data_args.task , _lowerCAmelCase ) if training_args.predict_with_generate else None ) _lowerCamelCase : List[Any] = SeqaSeqTrainer( model=_lowerCAmelCase , args=_lowerCAmelCase , data_args=_lowerCAmelCase , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , data_collator=SeqaSeqDataCollator( _lowerCAmelCase , _lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_lowerCAmelCase , tokenizer=_lowerCAmelCase , ) _lowerCamelCase : Optional[Any] = {} # Training if training_args.do_train: logger.info("*** Train ***" ) _lowerCamelCase : Optional[Any] = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) _lowerCamelCase : int = train_result.metrics _lowerCamelCase : Optional[int] = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("train" , _lowerCAmelCase , training_args.output_dir ) all_metrics.update(_lowerCAmelCase ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) _lowerCamelCase : Optional[Any] = trainer.evaluate(metric_key_prefix="val" ) _lowerCamelCase : Dict = data_args.n_val _lowerCamelCase : List[Any] = round(metrics["val_loss"] , 4 ) if trainer.is_world_process_zero(): handle_metrics("val" , _lowerCAmelCase , training_args.output_dir ) all_metrics.update(_lowerCAmelCase ) if training_args.do_predict: logger.info("*** Predict ***" ) _lowerCamelCase : Any = trainer.predict(test_dataset=_lowerCAmelCase , metric_key_prefix="test" ) _lowerCamelCase : Dict = test_output.metrics _lowerCamelCase : Optional[int] = data_args.n_test if trainer.is_world_process_zero(): _lowerCamelCase : int = round(metrics["test_loss"] , 4 ) handle_metrics("test" , _lowerCAmelCase , training_args.output_dir ) all_metrics.update(_lowerCAmelCase ) if training_args.predict_with_generate: _lowerCamelCase : List[str] = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase ) _lowerCamelCase : Any = lmap(str.strip , _lowerCAmelCase ) write_txt_file(_lowerCAmelCase , os.path.join(training_args.output_dir , "test_generations.txt" ) ) if trainer.is_world_process_zero(): save_json(_lowerCAmelCase , os.path.join(training_args.output_dir , "all_results.json" ) ) return all_metrics def A_ ( _lowerCAmelCase : int ): """simple docstring""" main() if __name__ == "__main__": main()
44
1
"""simple docstring""" import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __lowerCamelCase ( unittest.TestCase ): @property def SCREAMING_SNAKE_CASE__ ( self ) -> str: torch.manual_seed(0 ) UpperCamelCase__ = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , ) return model def SCREAMING_SNAKE_CASE__ ( self ) -> str: UpperCamelCase__ = self.dummy_uncond_unet UpperCamelCase__ = PNDMScheduler() UpperCamelCase__ = PNDMPipeline(unet=snake_case_ , scheduler=snake_case_ ) pndm.to(snake_case_ ) pndm.set_progress_bar_config(disable=snake_case_ ) UpperCamelCase__ = torch.manual_seed(0 ) UpperCamelCase__ = pndm(generator=snake_case_ , num_inference_steps=20 , output_type='numpy' ).images UpperCamelCase__ = torch.manual_seed(0 ) UpperCamelCase__ = pndm(generator=snake_case_ , num_inference_steps=20 , output_type='numpy' , return_dict=snake_case_ )[0] UpperCamelCase__ = image[0, -3:, -3:, -1] UpperCamelCase__ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCamelCase__ = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class __lowerCamelCase ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]: UpperCamelCase__ = 'google/ddpm-cifar10-32' UpperCamelCase__ = UNetaDModel.from_pretrained(snake_case_ ) UpperCamelCase__ = PNDMScheduler() UpperCamelCase__ = PNDMPipeline(unet=snake_case_ , scheduler=snake_case_ ) pndm.to(snake_case_ ) pndm.set_progress_bar_config(disable=snake_case_ ) UpperCamelCase__ = torch.manual_seed(0 ) UpperCamelCase__ = pndm(generator=snake_case_ , output_type='numpy' ).images UpperCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCamelCase__ = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
710
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType A__ : Any= logging.get_logger(__name__) A__ : str= { """microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""", } class __lowerCamelCase ( _a ): a : List[str] ="""layoutlmv3""" def __init__( self , snake_case_=5_0265 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_=1024 , snake_case_=128 , snake_case_=128 , snake_case_=True , snake_case_=32 , snake_case_=128 , snake_case_=64 , snake_case_=256 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=224 , snake_case_=3 , snake_case_=16 , snake_case_=None , **snake_case_ , ) -> Union[str, Any]: super().__init__( vocab_size=snake_case_ , hidden_size=snake_case_ , num_hidden_layers=snake_case_ , num_attention_heads=snake_case_ , intermediate_size=snake_case_ , hidden_act=snake_case_ , hidden_dropout_prob=snake_case_ , attention_probs_dropout_prob=snake_case_ , max_position_embeddings=snake_case_ , type_vocab_size=snake_case_ , initializer_range=snake_case_ , layer_norm_eps=snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ , ) UpperCamelCase__ = max_ad_position_embeddings UpperCamelCase__ = coordinate_size UpperCamelCase__ = shape_size UpperCamelCase__ = has_relative_attention_bias UpperCamelCase__ = rel_pos_bins UpperCamelCase__ = max_rel_pos UpperCamelCase__ = has_spatial_attention_bias UpperCamelCase__ = rel_ad_pos_bins UpperCamelCase__ = max_rel_ad_pos UpperCamelCase__ = text_embed UpperCamelCase__ = visual_embed UpperCamelCase__ = input_size UpperCamelCase__ = num_channels UpperCamelCase__ = patch_size UpperCamelCase__ = classifier_dropout class __lowerCamelCase ( _a ): a : Tuple =version.parse("""1.12""" ) @property def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]: # The order of inputs is different for question answering and sequence classification if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ('input_ids', {0: 'batch', 1: 'sequence'}), ('attention_mask', {0: 'batch', 1: 'sequence'}), ('bbox', {0: 'batch', 1: 'sequence'}), ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) else: return OrderedDict( [ ('input_ids', {0: 'batch', 1: 'sequence'}), ('bbox', {0: 'batch', 1: 'sequence'}), ('attention_mask', {0: 'batch', 1: 'sequence'}), ('pixel_values', {0: 'batch', 1: 'num_channels'}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self ) -> float: return 1E-5 @property def SCREAMING_SNAKE_CASE__ ( self ) -> int: return 12 def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , snake_case_ = 3 , snake_case_ = 40 , snake_case_ = 40 , ) -> Mapping[str, Any]: setattr(processor.image_processor , 'apply_ocr' , snake_case_ ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX UpperCamelCase__ = compute_effective_axis_dimension( snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCamelCase__ = processor.tokenizer.num_special_tokens_to_add(snake_case_ ) UpperCamelCase__ = compute_effective_axis_dimension( snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ ) # Generate dummy inputs according to compute batch and sequence UpperCamelCase__ = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes UpperCamelCase__ = [[[48, 84, 73, 128]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) UpperCamelCase__ = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) UpperCamelCase__ = dict( processor( snake_case_ , text=snake_case_ , boxes=snake_case_ , return_tensors=snake_case_ , ) ) return inputs
20
0
"""simple docstring""" from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function a_ : Any = 1.0_5457_1817e-34 # unit of ℏ : J * s a_ : List[Any] = 3e8 # unit of c : m * s^-1 def UpperCAmelCase ( A__: float , A__: float , A__: float ) -> dict[str, float]: if (force, area, distance).count(0 ) != 1: raise ValueError('One and only one argument must be 0' ) if force < 0: raise ValueError('Magnitude of force can not be negative' ) if distance < 0: raise ValueError('Distance can not be negative' ) if area < 0: raise ValueError('Area can not be negative' ) if force == 0: __lowerCamelCase : Optional[int] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: __lowerCamelCase : Any = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: __lowerCamelCase : List[Any] = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError('One and only one argument must be 0' ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
594
"""simple docstring""" from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel def UpperCAmelCase ( A__: int , A__: str , A__: List[Any]=None , A__: Dict=None ) -> List[str]: if attention_mask is None: __lowerCamelCase : int = tf.cast(tf.math.not_equal(A__ , config.pad_token_id ) , tf.inta ) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class __lowercase: '''simple docstring''' __a : Any = OPTConfig __a : Union[str, Any] = {} __a : Any = 'gelu' def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=False , __a=99 , __a=16 , __a=2 , __a=4 , __a=4 , __a="gelu" , __a=0.1 , __a=0.1 , __a=20 , __a=2 , __a=1 , __a=0 , __a=16 , __a=16 , ): __lowerCamelCase : Dict = parent __lowerCamelCase : List[str] = batch_size __lowerCamelCase : Tuple = seq_length __lowerCamelCase : int = is_training __lowerCamelCase : Optional[int] = use_labels __lowerCamelCase : Optional[int] = vocab_size __lowerCamelCase : Any = hidden_size __lowerCamelCase : Tuple = num_hidden_layers __lowerCamelCase : Optional[int] = num_attention_heads __lowerCamelCase : Any = intermediate_size __lowerCamelCase : Union[str, Any] = hidden_act __lowerCamelCase : int = hidden_dropout_prob __lowerCamelCase : List[Any] = attention_probs_dropout_prob __lowerCamelCase : Dict = max_position_embeddings __lowerCamelCase : str = eos_token_id __lowerCamelCase : int = pad_token_id __lowerCamelCase : Union[str, Any] = bos_token_id __lowerCamelCase : Dict = embed_dim __lowerCamelCase : Tuple = word_embed_proj_dim __lowerCamelCase : Any = False def snake_case_ ( self ): __lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __lowerCamelCase : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __lowerCamelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 ) __lowerCamelCase : Any = self.config_cls( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__a , **self.config_updates , ) __lowerCamelCase : int = prepare_opt_inputs_dict(__a , __a ) return config, inputs_dict def snake_case_ ( self , __a , __a ): __lowerCamelCase : Optional[int] = TFOPTModel(config=__a ) __lowerCamelCase : Dict = inputs_dict['input_ids'] __lowerCamelCase : List[Any] = input_ids[:1, :] __lowerCamelCase : Optional[int] = inputs_dict['attention_mask'][:1, :] __lowerCamelCase : Any = 1 # first forward pass __lowerCamelCase : int = model(__a , attention_mask=__a , use_cache=__a ) __lowerCamelCase , __lowerCamelCase : Tuple = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __lowerCamelCase : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size ) __lowerCamelCase : Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and __lowerCamelCase : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 ) __lowerCamelCase : Dict = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) __lowerCamelCase : Dict = model(__a , attention_mask=__a )[0] __lowerCamelCase : str = model(__a , attention_mask=__a , past_key_values=__a )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice __lowerCamelCase : Dict = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) __lowerCamelCase : Dict = output_from_no_past[:, -3:, random_slice_idx] __lowerCamelCase : Any = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__a , __a , rtol=1E-3 ) @require_tf class __lowercase( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' __a : List[Any] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () __a : List[Any] = (TFOPTForCausalLM,) if is_tf_available() else () __a : List[str] = ( {'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {} ) __a : List[Any] = False __a : Dict = False __a : Dict = False __a : int = 10 def snake_case_ ( self ): __lowerCamelCase : Optional[int] = TFOPTModelTester(self ) __lowerCamelCase : List[Any] = ConfigTester(self , config_class=__a ) def snake_case_ ( self ): self.config_tester.run_common_tests() def snake_case_ ( self ): __lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__a ) def snake_case_ ( self ): __lowerCamelCase , __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(__a , __a ): if hasattr(__a , 'weight' ): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build() if hasattr(__a , 'weight' ): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10]: # build the embeddings __lowerCamelCase : Any = model_class(config=__a ) __lowerCamelCase : Union[str, Any] = _get_word_embedding_weight(__a , model.get_input_embeddings() ) __lowerCamelCase : Any = _get_word_embedding_weight(__a , model.get_output_embeddings() ) # reshape the embeddings model.resize_token_embeddings(__a ) __lowerCamelCase : int = _get_word_embedding_weight(__a , model.get_input_embeddings() ) __lowerCamelCase : Tuple = _get_word_embedding_weight(__a , model.get_output_embeddings() ) # check that the resized embeddings size matches the desired size. __lowerCamelCase : Tuple = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0] , __a ) # check that weights remain the same after resizing __lowerCamelCase : List[Any] = True for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: __lowerCamelCase : str = False self.assertTrue(__a ) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0] , __a ) __lowerCamelCase : Dict = True for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: __lowerCamelCase : Tuple = False self.assertTrue(__a ) def UpperCAmelCase ( A__: Tuple ) -> Dict: return tf.constant(A__ , dtype=tf.intaa ) @require_tf class __lowercase( unittest.TestCase ): '''simple docstring''' __a : str = 99 def snake_case_ ( self ): __lowerCamelCase : str = tf.ones((4, 1) , dtype=tf.intaa ) * 2 __lowerCamelCase : int = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 ) __lowerCamelCase : int = input_ids.shape[0] __lowerCamelCase : Optional[int] = OPTConfig( vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size @require_sentencepiece @require_tf class __lowercase( unittest.TestCase ): '''simple docstring''' @slow def snake_case_ ( self ): __lowerCamelCase : str = TFOPTModel.from_pretrained('facebook/opt-350m' ) __lowerCamelCase : Dict = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] ) __lowerCamelCase : List[Any] = tf.not_equal(__a , model.config.pad_token_id ) with tf.GradientTape(): __lowerCamelCase : Dict = model(input_ids=__a , attention_mask=__a ).last_hidden_state __lowerCamelCase : List[Any] = (1, 11, 512) self.assertEqual(output.shape , __a ) __lowerCamelCase : str = tf.constant( [[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] ) self.assertTrue(np.allclose(output[:, :3, :3] , __a , atol=4E-3 ) ) __lowerCamelCase : int = tf.function(__a , jit_compile=__a ) __lowerCamelCase : Optional[int] = xla_generate(__a , __a )[0] self.assertTrue(np.allclose(output[:, :3, :3] , __a , atol=4E-2 ) ) @require_tf @slow class __lowercase( unittest.TestCase ): '''simple docstring''' def snake_case_ ( self ): super().setUp() __lowerCamelCase : str = 'facebook/opt-350m' def snake_case_ ( self ): __lowerCamelCase : int = TFOPTForCausalLM.from_pretrained(self.path_model ) __lowerCamelCase : List[Any] = GPTaTokenizer.from_pretrained(self.path_model ) __lowerCamelCase : Optional[int] = [ 'Today is a beautiful day and I want to', 'In the city of', 'Paris is the capital of France and', 'Computers and mobile phones have taken', ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False __lowerCamelCase : str = tokenizer(__a , return_tensors='tf' , padding=__a , add_special_tokens=__a ) __lowerCamelCase : Tuple = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) __lowerCamelCase : List[str] = tf.constant( [ [1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670], [-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822], [0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703], [6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477], ] ) self.assertTrue(np.allclose(__a , __a , atol=1E-4 ) ) __lowerCamelCase : Union[str, Any] = tf.function(__a , jit_compile=__a ) __lowerCamelCase : List[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) self.assertTrue(np.allclose(__a , __a , atol=1E-4 ) ) @require_tf @slow class __lowercase( unittest.TestCase ): '''simple docstring''' @property def snake_case_ ( self ): return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def snake_case_ ( self ): __lowerCamelCase : List[str] = 'facebook/opt-125m' __lowerCamelCase : Dict = [ 'Today is a beautiful day and I want to', 'In the city of New York, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] __lowerCamelCase : Tuple = [] __lowerCamelCase : str = GPTaTokenizer.from_pretrained(__a ) __lowerCamelCase : Dict = TFOPTForCausalLM.from_pretrained(__a ) for prompt in self.prompts: __lowerCamelCase : Dict = tokenizer(__a , return_tensors='tf' ).input_ids __lowerCamelCase : int = model.generate(__a , max_length=10 ) __lowerCamelCase : List[Any] = tokenizer.batch_decode(__a , skip_special_tokens=__a ) predicted_outputs += generated_string self.assertListEqual(__a , __a ) def snake_case_ ( self ): __lowerCamelCase : int = 'facebook/opt-350m' __lowerCamelCase : List[Any] = GPTaTokenizer.from_pretrained(__a ) __lowerCamelCase : str = TFOPTForCausalLM.from_pretrained(__a ) __lowerCamelCase : Optional[int] = 'left' # use different length sentences to test batching __lowerCamelCase : List[Any] = [ 'Hello, my dog is a little', 'Today, I', ] __lowerCamelCase : Optional[int] = tokenizer(__a , return_tensors='tf' , padding=__a ) __lowerCamelCase : Tuple = inputs['input_ids'] __lowerCamelCase : Optional[Any] = model.generate(input_ids=__a , attention_mask=inputs['attention_mask'] ) __lowerCamelCase : Any = tokenizer(sentences[0] , return_tensors='tf' ).input_ids __lowerCamelCase : Optional[Any] = model.generate(input_ids=__a ) __lowerCamelCase : List[Any] = inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs['attention_mask'][-1] , tf.intaa ) ) __lowerCamelCase : Union[str, Any] = tokenizer(sentences[1] , return_tensors='tf' ).input_ids __lowerCamelCase : Any = model.generate(input_ids=__a , max_length=model.config.max_length - num_paddings ) __lowerCamelCase : Optional[int] = tokenizer.batch_decode(__a , skip_special_tokens=__a ) __lowerCamelCase : str = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__a ) __lowerCamelCase : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__a ) __lowerCamelCase : Any = [ 'Hello, my dog is a little bit of a dork.\nI\'m a little bit', 'Today, I was in the middle of a conversation with a friend about the', ] self.assertListEqual(__a , __a ) self.assertListEqual(__a , [non_padded_sentence, padded_sentence] ) def snake_case_ ( self ): __lowerCamelCase : Any = 'facebook/opt-350m' __lowerCamelCase : str = [ 'Today is a beautiful day and I want to', 'In the city of San Francisco, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] __lowerCamelCase : int = [] __lowerCamelCase : Tuple = GPTaTokenizer.from_pretrained(__a ) __lowerCamelCase : List[Any] = TFOPTForCausalLM.from_pretrained(__a ) for prompt in self.prompts: __lowerCamelCase : Optional[Any] = tokenizer(__a , return_tensors='tf' ).input_ids __lowerCamelCase : List[Any] = model.generate(__a , max_length=10 ) __lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(__a , skip_special_tokens=__a ) predicted_outputs += generated_string self.assertListEqual(__a , __a )
594
1
import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class _A ( UpperCAmelCase_ ): def __init__( self : Optional[Any] , lowerCamelCase__ : UNetaDModel , lowerCamelCase__ : UNetaDModel , lowerCamelCase__ : DDPMScheduler , lowerCamelCase__ : Any , ): """simple docstring""" super().__init__() __UpperCamelCase : Tuple = value_function __UpperCamelCase : List[str] = unet __UpperCamelCase : int = scheduler __UpperCamelCase : Optional[Any] = env __UpperCamelCase : Union[str, Any] = env.get_dataset() __UpperCamelCase : List[str] = {} for key in self.data.keys(): try: __UpperCamelCase : Union[str, Any] = self.data[key].mean() except: # noqa: E722 pass __UpperCamelCase : Dict = {} for key in self.data.keys(): try: __UpperCamelCase : List[Any] = self.data[key].std() except: # noqa: E722 pass __UpperCamelCase : Any = env.observation_space.shape[0] __UpperCamelCase : Tuple = env.action_space.shape[0] def a ( self : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : int ): """simple docstring""" return (x_in - self.means[key]) / self.stds[key] def a ( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : List[str] ): """simple docstring""" return x_in * self.stds[key] + self.means[key] def a ( self : List[str] , lowerCamelCase__ : Tuple ): """simple docstring""" if type(lowerCamelCase__ ) is dict: return {k: self.to_torch(lowerCamelCase__ ) for k, v in x_in.items()} elif torch.is_tensor(lowerCamelCase__ ): return x_in.to(self.unet.device ) return torch.tensor(lowerCamelCase__ , device=self.unet.device ) def a ( self : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any] ): """simple docstring""" for key, val in cond.items(): __UpperCamelCase : int = val.clone() return x_in def a ( self : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] ): """simple docstring""" __UpperCamelCase : int = x.shape[0] __UpperCamelCase : List[Any] = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model __UpperCamelCase : Dict = torch.full((batch_size,) , lowerCamelCase__ , device=self.unet.device , dtype=torch.long ) for _ in range(lowerCamelCase__ ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models __UpperCamelCase : List[str] = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample __UpperCamelCase : int = torch.autograd.grad([y.sum()] , [x] )[0] __UpperCamelCase : Any = self.scheduler._get_variance(lowerCamelCase__ ) __UpperCamelCase : Union[str, Any] = torch.exp(0.5 * posterior_variance ) __UpperCamelCase : Tuple = model_std * grad __UpperCamelCase : Any = 0 __UpperCamelCase : List[Any] = x.detach() __UpperCamelCase : List[Any] = x + scale * grad __UpperCamelCase : Optional[Any] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim ) __UpperCamelCase : str = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg __UpperCamelCase : List[str] = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , predict_epsilon=lowerCamelCase__ )["""prev_sample"""] # apply conditions to the trajectory (set the initial state) __UpperCamelCase : int = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim ) __UpperCamelCase : Tuple = self.to_torch(lowerCamelCase__ ) return x, y def __call__( self : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : int=64 , lowerCamelCase__ : Tuple=32 , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : List[str]=0.1 ): """simple docstring""" __UpperCamelCase : int = self.normalize(lowerCamelCase__ , """observations""" ) __UpperCamelCase : Dict = obs[None].repeat(lowerCamelCase__ , axis=0 ) __UpperCamelCase : Any = {0: self.to_torch(lowerCamelCase__ )} __UpperCamelCase : str = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) __UpperCamelCase : Optional[Any] = randn_tensor(lowerCamelCase__ , device=self.unet.device ) __UpperCamelCase : Any = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim ) __UpperCamelCase : List[Any] = self.to_torch(lowerCamelCase__ ) # run the diffusion process __UpperCamelCase , __UpperCamelCase : Any = self.run_diffusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # sort output trajectories by value __UpperCamelCase : str = y.argsort(0 , descending=lowerCamelCase__ ).squeeze() __UpperCamelCase : List[str] = x[sorted_idx] __UpperCamelCase : int = sorted_values[:, :, : self.action_dim] __UpperCamelCase : List[Any] = actions.detach().cpu().numpy() __UpperCamelCase : Dict = self.de_normalize(lowerCamelCase__ , key="""actions""" ) # select the action with the highest value if y is not None: __UpperCamelCase : List[str] = 0 else: # if we didn't run value guiding, select a random action __UpperCamelCase : str = np.random.randint(0 , lowerCamelCase__ ) __UpperCamelCase : Any = denorm_actions[selected_index, 0] return denorm_actions
701
from .imports import is_rich_available if is_rich_available(): from rich.traceback import install install(show_locals=False) else: raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
515
0
import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def A__ ( snake_case_ : Tuple ): def wrapper(*snake_case_ : Optional[Any] , **snake_case_ : Optional[int] ): SCREAMING_SNAKE_CASE__: Optional[int]= timeit.default_timer() SCREAMING_SNAKE_CASE__: str= func(*snake_case_ , **snake_case_ ) SCREAMING_SNAKE_CASE__: str= timeit.default_timer() - starttime return delta SCREAMING_SNAKE_CASE__: List[Any]= func.__name__ return wrapper def A__ ( snake_case_ : dict , snake_case_ : Any=100 , snake_case_ : str=None ): SCREAMING_SNAKE_CASE__: Tuple= [] SCREAMING_SNAKE_CASE__: Any= seq_shapes or {} for i in range(snake_case_ ): SCREAMING_SNAKE_CASE__: Any= {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(snake_case_ , _ArrayXD ): SCREAMING_SNAKE_CASE__: Union[str, Any]= np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(snake_case_ , datasets.Value ): if v.dtype == "string": SCREAMING_SNAKE_CASE__: Optional[int]= '''The small grey turtle was surprisingly fast when challenged.''' else: SCREAMING_SNAKE_CASE__: Union[str, Any]= np.random.randint(10 , size=1 ).astype(v.dtype ).item() elif isinstance(snake_case_ , datasets.Sequence ): while isinstance(snake_case_ , datasets.Sequence ): SCREAMING_SNAKE_CASE__: Dict= v.feature SCREAMING_SNAKE_CASE__: Dict= seq_shapes[k] SCREAMING_SNAKE_CASE__: Dict= np.random.rand(*snake_case_ ).astype(v.dtype ) SCREAMING_SNAKE_CASE__: Dict= data dummy_data.append((i, example) ) return dummy_data def A__ ( snake_case_ : int , snake_case_ : Optional[Any] , snake_case_ : Optional[Any]=100 , snake_case_ : Optional[Any]=None ): SCREAMING_SNAKE_CASE__: Union[str, Any]= generate_examples(snake_case_ , num_examples=snake_case_ , seq_shapes=snake_case_ ) with ArrowWriter(features=snake_case_ , path=snake_case_ ) as writer: for key, record in dummy_data: SCREAMING_SNAKE_CASE__: Tuple= features.encode_example(snake_case_ ) writer.write(snake_case_ ) SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Tuple= writer.finalize() if not num_final_examples == num_examples: raise ValueError( F'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' ) SCREAMING_SNAKE_CASE__: Union[str, Any]= datasets.Dataset.from_file(filename=snake_case_ , info=datasets.DatasetInfo(features=snake_case_ ) ) return dataset
64
"""simple docstring""" from scipy.stats import spearmanr import datasets A_ = ''' The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. ''' A_ = ''' Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {\'spearmanr\': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results[\'spearmanr\']) -0.7 >>> print(round(results[\'spearmanr_pvalue\'], 2)) 0.19 ''' A_ = r'''\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase( datasets.Metric ): '''simple docstring''' def UpperCamelCase_ ( self: Dict ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { """predictions""": datasets.Value("""float""" ), """references""": datasets.Value("""float""" ), } ), reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""], ) def UpperCamelCase_ ( self: Any, a_: Union[str, Any], a_: Union[str, Any], a_: str=False ): '''simple docstring''' _snake_case : Optional[Any] = spearmanr(a_, a_ ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
609
0
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / 'utils')) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 A = get_tests_dir('fixtures') class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Union[str, Any] ) -> Dict: _lowerCamelCase = mock.Mock() _lowerCamelCase = 5_0_0 _lowerCamelCase = {} _lowerCamelCase = HTTPError _lowerCamelCase = {} # Download this model to make sure it's in the cache. _lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request' , return_value=a_ ) as mock_head: _lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' ) # This check we did call the fake head request mock_head.assert_called() def _snake_case ( self : Dict ) -> List[Any]: _lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' ) @is_staging_test class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @classmethod def _snake_case ( cls : Union[str, Any] ) -> Union[str, Any]: _lowerCamelCase = TOKEN HfFolder.save_token(a_ ) @classmethod def _snake_case ( cls : Tuple ) -> Optional[int]: try: delete_repo(token=cls._token , repo_id='test-feature-extractor' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' ) except HTTPError: pass def _snake_case ( self : List[str] ) -> Tuple: _lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained(a_ ) feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token ) _lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(a_ , getattr(a_ , a_ ) ) # Reset repo delete_repo(token=self._token , repo_id='test-feature-extractor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( a_ , repo_id='test-feature-extractor' , push_to_hub=a_ , use_auth_token=self._token ) _lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(a_ , getattr(a_ , a_ ) ) def _snake_case ( self : str ) -> int: _lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained(a_ ) feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token ) _lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(a_ , getattr(a_ , a_ ) ) # Reset repo delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( a_ , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=a_ , use_auth_token=self._token ) _lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(a_ , getattr(a_ , a_ ) ) def _snake_case ( self : Dict ) -> List[Any]: CustomFeatureExtractor.register_for_auto_class() _lowerCamelCase = CustomFeatureExtractor.from_pretrained(a_ ) feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , ) _lowerCamelCase = AutoFeatureExtractor.from_pretrained( f"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=a_ ) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
706
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 def __init__( self : int , snake_case__ : UNetaDModel , snake_case__ : ScoreSdeVeScheduler ) -> Dict: super().__init__() self.register_modules(unet=snake_case__ , scheduler=snake_case__ ) @torch.no_grad() def __call__( self : Any , snake_case__ : int = 1 , snake_case__ : int = 2_0_0_0 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , **snake_case__ : Tuple , ) -> Union[ImagePipelineOutput, Tuple]: _lowerCamelCase = self.unet.config.sample_size _lowerCamelCase = (batch_size, 3, img_size, img_size) _lowerCamelCase = self.unet _lowerCamelCase = randn_tensor(snake_case__ , generator=snake_case__ ) * self.scheduler.init_noise_sigma _lowerCamelCase = sample.to(self.device ) self.scheduler.set_timesteps(snake_case__ ) self.scheduler.set_sigmas(snake_case__ ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): _lowerCamelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): _lowerCamelCase = self.unet(snake_case__ , snake_case__ ).sample _lowerCamelCase = self.scheduler.step_correct(snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample # prediction step _lowerCamelCase = model(snake_case__ , snake_case__ ).sample _lowerCamelCase = self.scheduler.step_pred(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ) _lowerCamelCase , _lowerCamelCase = output.prev_sample, output.prev_sample_mean _lowerCamelCase = sample_mean.clamp(0 , 1 ) _lowerCamelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _lowerCamelCase = self.numpy_to_pil(snake_case__ ) if not return_dict: return (sample,) return ImagePipelineOutput(images=snake_case__ )
234
0
'''simple docstring''' from cva import destroyAllWindows, imread, imshow, waitKey def _SCREAMING_SNAKE_CASE (A ) -> Any: """simple docstring""" lowercase__ ,lowercase__ = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(A ): for j in range(A ): lowercase__ = [255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image lowerCamelCase : Tuple = imread('image_data/lena.jpg', 1) # convert to its negative lowerCamelCase : Any = convert_to_negative(img) # show result image imshow('negative of original image', img) waitKey(0) destroyAllWindows()
460
'''simple docstring''' import unittest from transformers import RoFormerTokenizer, RoFormerTokenizerFast from transformers.testing_utils import require_rjieba, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_rjieba @require_tokenizers class __lowerCAmelCase (lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : str = RoFormerTokenizer lowerCAmelCase__ : Dict = RoFormerTokenizerFast lowerCAmelCase__ : List[str] = True lowerCAmelCase__ : List[str] = True def UpperCamelCase__ (self : Dict ): '''simple docstring''' super().setUp() def UpperCamelCase__ (self : List[str] , **UpperCamelCase : Dict ): '''simple docstring''' return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **UpperCamelCase ) def UpperCamelCase__ (self : List[str] , **UpperCamelCase : int ): '''simple docstring''' return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **UpperCamelCase ) def UpperCamelCase__ (self : List[str] ): '''simple docstring''' lowercase__ = '''永和服装饰品有限公司,今天天气非常好''' lowercase__ = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好''' return input_text, output_text def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = self.get_tokenizer() lowercase__ ,lowercase__ = self.get_chinese_input_output_texts() lowercase__ = tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , output_text.split() ) lowercase__ = tokens + [tokenizer.unk_token] lowercase__ = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = self.get_rust_tokenizer() lowercase__ ,lowercase__ = self.get_chinese_input_output_texts() lowercase__ = tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , output_text.split() ) lowercase__ = tokens + [tokenizer.unk_token] lowercase__ = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase ) def UpperCamelCase__ (self : str ): '''simple docstring''' pass def UpperCamelCase__ (self : Any ): '''simple docstring''' pass def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' pass
460
1
'''simple docstring''' import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class UpperCamelCase__ ( a , a , unittest.TestCase ): '''simple docstring''' _snake_case = IFImgaImgSuperResolutionPipeline _snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''} _snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} ) _snake_case = PipelineTesterMixin.required_optional_params - {'''latents'''} def snake_case ( self ) -> Union[str, Any]: return self._get_superresolution_dummy_components() def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0 ) -> Tuple: if str(SCREAMING_SNAKE_CASE ).startswith('mps' ): __lowerCAmelCase : Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE ) ).to(SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = floats_tensor((1, 3, 16, 16) , rng=random.Random(SCREAMING_SNAKE_CASE ) ).to(SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'original_image': original_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def snake_case ( self ) -> Any: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def snake_case ( self ) -> Tuple: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def snake_case ( self ) -> List[str]: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def snake_case ( self ) -> Optional[int]: self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def snake_case ( self ) -> Optional[int]: self._test_save_load_local() def snake_case ( self ) -> Any: self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
123
'''simple docstring''' def A ( _UpperCAmelCase : int = 5_0 ) -> int: '''simple docstring''' __lowerCAmelCase : Any = [1] * (length + 1) for row_length in range(3 ,length + 1 ): for block_length in range(3 ,row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(F'''{solution() = }''')
123
1
from collections import Counter from timeit import timeit def a__ ( A__ = "", ): return sum(c % 2 for c in Counter(input_str.replace(' ', '' ).lower() ).values() ) < 2 def a__ ( A__ = "" ): if len(A__ ) == 0: return True SCREAMING_SNAKE_CASE_ : Optional[int] = input_str.replace(' ', '' ).lower() # character_freq_dict: Stores the frequency of every character in the input string SCREAMING_SNAKE_CASE_ : dict[str, int] = {} for character in lower_case_input_str: SCREAMING_SNAKE_CASE_ : Optional[Any] = character_freq_dict.get(A__, 0 ) + 1 SCREAMING_SNAKE_CASE_ : str = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def a__ ( A__ = "" ): print('\nFor string = ', A__, ':' ) print( '> can_string_be_rearranged_as_palindrome_counter()', '\tans =', can_string_be_rearranged_as_palindrome_counter(A__ ), '\ttime =', timeit( 'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)', setup='import __main__ as z', ), 'seconds', ) print( '> can_string_be_rearranged_as_palindrome()', '\tans =', can_string_be_rearranged_as_palindrome(A__ ), '\ttime =', timeit( 'z.can_string_be_rearranged_as_palindrome(z.check_str)', setup='import __main__ as z', ), 'seconds', ) if __name__ == "__main__": lowerCAmelCase__ : int =input( 'Enter string to determine if it can be rearranged as a palindrome or not: ' ).strip() benchmark(check_str) lowerCAmelCase__ : List[Any] =can_string_be_rearranged_as_palindrome_counter(check_str) print(F"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
101
from __future__ import annotations lowerCAmelCase__ : Union[str, Any] =[ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def a__ ( A__, A__, A__, A__, A__, ): SCREAMING_SNAKE_CASE_ : List[Any] = [ [0 for col in range(len(grid[0] ) )] for row in range(len(A__ ) ) ] # the reference grid SCREAMING_SNAKE_CASE_ : List[Any] = 1 SCREAMING_SNAKE_CASE_ : Union[str, Any] = [ [0 for col in range(len(grid[0] ) )] for row in range(len(A__ ) ) ] # the action grid SCREAMING_SNAKE_CASE_ : Optional[int] = init[0] SCREAMING_SNAKE_CASE_ : List[str] = init[1] SCREAMING_SNAKE_CASE_ : Dict = 0 SCREAMING_SNAKE_CASE_ : List[Any] = g + heuristic[x][y] # cost from starting cell to destination cell SCREAMING_SNAKE_CASE_ : List[str] = [[f, g, x, y]] SCREAMING_SNAKE_CASE_ : Dict = False # flag that is set when search is complete SCREAMING_SNAKE_CASE_ : Tuple = False # flag set if we can't find expand while not found and not resign: if len(A__ ) == 0: raise ValueError('Algorithm is unable to find solution' ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() SCREAMING_SNAKE_CASE_ : Any = cell.pop() SCREAMING_SNAKE_CASE_ : Any = next_cell[2] SCREAMING_SNAKE_CASE_ : str = next_cell[3] SCREAMING_SNAKE_CASE_ : Tuple = next_cell[1] if x == goal[0] and y == goal[1]: SCREAMING_SNAKE_CASE_ : Dict = True else: for i in range(len(A__ ) ): # to try out different valid actions SCREAMING_SNAKE_CASE_ : Optional[int] = x + DIRECTIONS[i][0] SCREAMING_SNAKE_CASE_ : List[Any] = y + DIRECTIONS[i][1] if xa >= 0 and xa < len(A__ ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: SCREAMING_SNAKE_CASE_ : List[Any] = g + cost SCREAMING_SNAKE_CASE_ : List[Any] = ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) SCREAMING_SNAKE_CASE_ : int = 1 SCREAMING_SNAKE_CASE_ : int = i SCREAMING_SNAKE_CASE_ : Union[str, Any] = [] SCREAMING_SNAKE_CASE_ : List[str] = goal[0] SCREAMING_SNAKE_CASE_ : Optional[Any] = goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: SCREAMING_SNAKE_CASE_ : Any = x - DIRECTIONS[action[x][y]][0] SCREAMING_SNAKE_CASE_ : int = y - DIRECTIONS[action[x][y]][1] SCREAMING_SNAKE_CASE_ : int = xa SCREAMING_SNAKE_CASE_ : List[Any] = ya invpath.append([x, y] ) SCREAMING_SNAKE_CASE_ : List[Any] = [] for i in range(len(A__ ) ): path.append(invpath[len(A__ ) - 1 - i] ) return path, action if __name__ == "__main__": lowerCAmelCase__ : Union[str, Any] =[ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] lowerCAmelCase__ : Any =[0, 0] # all coordinates are given in format [y,x] lowerCAmelCase__ : List[str] =[len(grid) - 1, len(grid[0]) - 1] lowerCAmelCase__ : str =1 # the cost map which pushes the path closer to the goal lowerCAmelCase__ : Union[str, Any] =[[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): lowerCAmelCase__ : List[str] =abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map lowerCAmelCase__ : List[str] =99 lowerCAmelCase__ ,lowerCAmelCase__ : Any =search(grid, init, goal, cost, heuristic) print('ACTION MAP') for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
101
1
import heapq as hq import math from collections.abc import Iterator class SCREAMING_SNAKE_CASE_ : """simple docstring""" def __init__( self , A ) -> Optional[int]: '''simple docstring''' __magic_name__ = str(id_ ) __magic_name__ = None __magic_name__ = None __magic_name__ = [] __magic_name__ = {} # {vertex:distance} def __lt__( self , A ) -> Optional[Any]: '''simple docstring''' return self.key < other.key def __repr__( self ) -> int: '''simple docstring''' return self.id def __A ( self , A ) -> Union[str, Any]: '''simple docstring''' self.neighbors.append(A ) def __A ( self , A , A ) -> Optional[int]: '''simple docstring''' __magic_name__ = weight def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : Optional[int] ): # add the neighbors: graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , snake_case_ ) graph[b - 1].add_edge(graph[a - 1] , snake_case_ ) def _SCREAMING_SNAKE_CASE ( snake_case_ : list , snake_case_ : Vertex ): __magic_name__ = [] for u in graph: __magic_name__ = math.inf __magic_name__ = None __magic_name__ = 0 __magic_name__ = graph[:] while q: __magic_name__ = min(snake_case_ ) q.remove(snake_case_ ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): __magic_name__ = u __magic_name__ = u.edges[v.id] for i in range(1 , len(snake_case_ ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def _SCREAMING_SNAKE_CASE ( snake_case_ : list , snake_case_ : Vertex ): for u in graph: __magic_name__ = math.inf __magic_name__ = None __magic_name__ = 0 __magic_name__ = list(snake_case_ ) hq.heapify(snake_case_ ) while h: __magic_name__ = hq.heappop(snake_case_ ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): __magic_name__ = u __magic_name__ = u.edges[v.id] hq.heapify(snake_case_ ) for i in range(1 , len(snake_case_ ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def _SCREAMING_SNAKE_CASE ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
714
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ): return " ".join( ''''''.join(word[::-1] ) if len(snake_case_ ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('Hey wollef sroirraw'))
678
0
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging snake_case : Optional[int] = logging.get_logger(__name__) snake_case : str = '▁' snake_case : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'} snake_case : str = { 'vocab_file': { 'facebook/mbart-large-en-ro': ( 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model' ), 'facebook/mbart-large-cc25': ( 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model' ), } } snake_case : Tuple = { 'facebook/mbart-large-en-ro': 1_024, 'facebook/mbart-large-cc25': 1_024, } # fmt: off snake_case : List[str] = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN'] class lowerCamelCase__( snake_case_ ): UpperCamelCase : List[str] = VOCAB_FILES_NAMES UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase : Dict = ["input_ids", "attention_mask"] UpperCamelCase : List[int] = [] UpperCamelCase : List[int] = [] def __init__( self , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase = None , __UpperCAmelCase=None , **__UpperCAmelCase , ): """simple docstring""" __lowercase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token __lowercase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) __lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__UpperCAmelCase ) ) __lowercase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token __lowercase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __lowercase = 1 __lowercase = len(self.sp_model ) __lowercase = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__UpperCAmelCase ) } __lowercase = {v: k for k, v in self.lang_code_to_id.items()} __lowercase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) __lowercase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} __lowercase = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) __lowercase = src_lang if src_lang is not None else """en_XX""" __lowercase = self.lang_code_to_id[self._src_lang] __lowercase = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self ): """simple docstring""" __lowercase = self.__dict__.copy() __lowercase = None __lowercase = self.sp_model.serialized_model_proto() return state def __setstate__( self , __UpperCAmelCase ): """simple docstring""" __lowercase = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): __lowercase = {} __lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def __magic_name__ ( self ): """simple docstring""" return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def __magic_name__ ( self ): """simple docstring""" return self._src_lang @src_lang.setter def __magic_name__ ( self , __UpperCAmelCase ): """simple docstring""" __lowercase = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) __lowercase = [1] * len(self.prefix_tokens ) __lowercase = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__UpperCAmelCase )) + suffix_ones return prefix_ones + ([0] * len(__UpperCAmelCase )) + ([0] * len(__UpperCAmelCase )) + suffix_ones def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None ): """simple docstring""" __lowercase = [self.sep_token_id] __lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ): """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) __lowercase = src_lang __lowercase = self(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) __lowercase = self.convert_tokens_to_ids(__UpperCAmelCase ) __lowercase = tgt_lang_id return inputs def __magic_name__ ( self ): """simple docstring""" __lowercase = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __magic_name__ ( self , __UpperCAmelCase ): """simple docstring""" return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) def __magic_name__ ( self , __UpperCAmelCase ): """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __lowercase = self.sp_model.PieceToId(__UpperCAmelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __magic_name__ ( self , __UpperCAmelCase ): """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __magic_name__ ( self , __UpperCAmelCase ): """simple docstring""" __lowercase = """""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip() return out_string def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None ): """simple docstring""" if not os.path.isdir(__UpperCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return __lowercase = os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase , """wb""" ) as fi: __lowercase = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,) def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = "en_XX" , __UpperCAmelCase = None , __UpperCAmelCase = "ro_RO" , **__UpperCAmelCase , ): """simple docstring""" __lowercase = src_lang __lowercase = tgt_lang return super().prepare_seqaseq_batch(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) def __magic_name__ ( self ): """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def __magic_name__ ( self ): """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __magic_name__ ( self , __UpperCAmelCase ): """simple docstring""" __lowercase = self.lang_code_to_id[src_lang] __lowercase = [] __lowercase = [self.eos_token_id, self.cur_lang_code] def __magic_name__ ( self , __UpperCAmelCase ): """simple docstring""" __lowercase = self.lang_code_to_id[lang] __lowercase = [] __lowercase = [self.eos_token_id, self.cur_lang_code]
566
'''simple docstring''' import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() snake_case : Union[str, Any] = logging.get_logger(__name__) snake_case : Any = { 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } snake_case : Union[str, Any] = { 'b0': { 'hidden_dim': 1_280, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 224, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 1_280, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 240, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 1_408, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 260, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 1_536, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 300, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 1_792, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 380, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 2_048, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 456, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 2_304, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 528, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 2_560, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 600, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def lowercase__ ( __UpperCamelCase : str ): '''simple docstring''' __lowercase = EfficientNetConfig() __lowercase = CONFIG_MAP[model_name]["""hidden_dim"""] __lowercase = CONFIG_MAP[model_name]["""width_coef"""] __lowercase = CONFIG_MAP[model_name]["""depth_coef"""] __lowercase = CONFIG_MAP[model_name]["""image_size"""] __lowercase = CONFIG_MAP[model_name]["""dropout_rate"""] __lowercase = CONFIG_MAP[model_name]["""dw_padding"""] __lowercase = """huggingface/label-files""" __lowercase = """imagenet-1k-id2label.json""" __lowercase = 1000 __lowercase = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) __lowercase = {int(__UpperCamelCase ): v for k, v in idalabel.items()} __lowercase = idalabel __lowercase = {v: k for k, v in idalabel.items()} return config def lowercase__ ( ): '''simple docstring''' __lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg""" __lowercase = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ) return im def lowercase__ ( __UpperCamelCase : Union[str, Any] ): '''simple docstring''' __lowercase = CONFIG_MAP[model_name]["""image_size"""] __lowercase = EfficientNetImageProcessor( size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=__UpperCamelCase , ) return preprocessor def lowercase__ ( __UpperCamelCase : str ): '''simple docstring''' __lowercase = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] __lowercase = sorted(set(__UpperCamelCase ) ) __lowercase = len(__UpperCamelCase ) __lowercase = {b: str(__UpperCamelCase ) for b, i in zip(__UpperCamelCase , range(__UpperCamelCase ) )} __lowercase = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: __lowercase = block_name_mapping[b] rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) __lowercase = {} for item in rename_keys: if item[0] in original_param_names: __lowercase = """efficientnet.""" + item[1] __lowercase = """classifier.weight""" __lowercase = """classifier.bias""" return key_mapping def lowercase__ ( __UpperCamelCase : Tuple , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ): '''simple docstring''' for key, value in tf_params.items(): if "normalization" in key: continue __lowercase = key_mapping[key] if "_conv" in key and "kernel" in key: __lowercase = torch.from_numpy(__UpperCamelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: __lowercase = torch.from_numpy(__UpperCamelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: __lowercase = torch.from_numpy(np.transpose(__UpperCamelCase ) ) else: __lowercase = torch.from_numpy(__UpperCamelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(__UpperCamelCase ) @torch.no_grad() def lowercase__ ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : str ): '''simple docstring''' __lowercase = model_classes[model_name]( include_top=__UpperCamelCase , weights="""imagenet""" , input_tensor=__UpperCamelCase , input_shape=__UpperCamelCase , pooling=__UpperCamelCase , classes=1000 , classifier_activation="""softmax""" , ) __lowercase = original_model.trainable_variables __lowercase = original_model.non_trainable_variables __lowercase = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: __lowercase = param.numpy() __lowercase = list(tf_params.keys() ) # Load HuggingFace model __lowercase = get_efficientnet_config(__UpperCamelCase ) __lowercase = EfficientNetForImageClassification(__UpperCamelCase ).eval() __lowercase = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) __lowercase = rename_keys(__UpperCamelCase ) replace_params(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # Initialize preprocessor and preprocess input image __lowercase = convert_image_processor(__UpperCamelCase ) __lowercase = preprocessor(images=prepare_img() , return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): __lowercase = hf_model(**__UpperCamelCase ) __lowercase = outputs.logits.detach().numpy() # Original model inference __lowercase = False __lowercase = CONFIG_MAP[model_name]["""image_size"""] __lowercase = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) __lowercase = image.img_to_array(__UpperCamelCase ) __lowercase = np.expand_dims(__UpperCamelCase , axis=0 ) __lowercase = original_model.predict(__UpperCamelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(__UpperCamelCase ): os.mkdir(__UpperCamelCase ) # Save converted model and image processor hf_model.save_pretrained(__UpperCamelCase ) preprocessor.save_pretrained(__UpperCamelCase ) if push_to_hub: # Push model and image processor to hub print(F'''Pushing converted {model_name} to the hub...''' ) __lowercase = F'''efficientnet-{model_name}''' preprocessor.push_to_hub(__UpperCamelCase ) hf_model.push_to_hub(__UpperCamelCase ) if __name__ == "__main__": snake_case : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') snake_case : Tuple = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
566
1
import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """emoji_file""": """emoji.json"""} _lowerCAmelCase = { """vocab_file""": { """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt""", }, """emoji_file""": { """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json""", }, } _lowerCAmelCase = { """abeja/gpt-neox-japanese-2.7b""": 2_0_4_8, } def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' with open(_lowerCamelCase , 'r' , encoding='utf-8' ) as f: _lowerCAmelCase : Tuple = json.loads(f.read() ) _lowerCAmelCase : Tuple = collections.OrderedDict() _lowerCAmelCase : List[str] = collections.OrderedDict() _lowerCAmelCase : Union[str, Any] = collections.OrderedDict() with open(_lowerCamelCase , 'r' , encoding='utf-8' ) as f: _lowerCAmelCase : Tuple = f.readlines() _lowerCAmelCase : int = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token] for idx, b in enumerate(_lowerCamelCase ): _lowerCAmelCase : Optional[int] = b _lowerCAmelCase : Dict = idx for wd in b: _lowerCAmelCase : Dict = idx return vocab, raw_vocab, ids_to_tokens, emoji class __UpperCamelCase ( a__ ): _UpperCAmelCase = VOCAB_FILES_NAMES _UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase = ["input_ids", "attention_mask"] def __init__( self ,_A ,_A ,_A="<|endoftext|>" ,_A="<|endoftext|>" ,_A="<|startoftext|>" ,_A="<|endoftext|>" ,_A=False ,**_A ,): '''simple docstring''' super().__init__( unk_token=_A ,pad_token=_A ,bos_token=_A ,eos_token=_A ,do_clean_text=_A ,**_A ,) if not os.path.isfile(_A ): raise ValueError( F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained""" ' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' ) if not os.path.isfile(_A ): raise ValueError( F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google""" ' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' ) _lowerCAmelCase : Tuple = do_clean_text _lowerCAmelCase : str = load_vocab_and_emoji(_A ,_A ) _lowerCAmelCase : List[Any] = SubWordJapaneseTokenizer( vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji ) @property def __lowerCamelCase ( self ): '''simple docstring''' return len(self.raw_vocab ) def __lowerCamelCase ( self ): '''simple docstring''' return dict(self.raw_vocab ,**self.added_tokens_encoder ) def __lowerCamelCase ( self ,_A ): '''simple docstring''' return self.subword_tokenizer.tokenize(_A ,clean=self.do_clean_text ) def __lowerCamelCase ( self ,_A ): '''simple docstring''' return self.vocab.get(_A ,self.vocab.get(self.unk_token ) ) def __lowerCamelCase ( self ,_A ): '''simple docstring''' return self.subword_tokenizer.convert_id_to_token(_A ) def __lowerCamelCase ( self ,_A ): '''simple docstring''' _lowerCAmelCase : List[str] = ''.join(_A ).strip() return out_string def __lowerCamelCase ( self ,_A ): '''simple docstring''' _lowerCAmelCase : int = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(_A ,add_special_tokens=_A ) + [self.eos_token_id] ) if len(_A ) > self.model_max_length: _lowerCAmelCase : Optional[int] = input_ids[-self.model_max_length :] return input_ids def __lowerCamelCase ( self ,_A ,_A = None ): '''simple docstring''' _lowerCAmelCase : Tuple = 0 if os.path.isdir(_A ): _lowerCAmelCase : int = os.path.join( _A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) _lowerCAmelCase : Optional[Any] = os.path.join( _A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] ) else: _lowerCAmelCase : Dict = ( (filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file'] ) _lowerCAmelCase : List[Any] = ( (filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file'] ) with open(_A ,'w' ,encoding='utf-8' ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ' Please check that the vocabulary is not corrupted!' ) _lowerCAmelCase : Optional[int] = token_index writer.write(','.join(_A ) + '\n' ) index += 1 with open(_A ,'w' ,encoding='utf-8' ) as writer: json.dump(self.emoji ,_A ) return vocab_file, emoji_file class __UpperCamelCase ( a__ ): def __init__( self ,_A ,_A ,_A ): '''simple docstring''' _lowerCAmelCase : Any = vocab # same as swe _lowerCAmelCase : int = ids_to_tokens # same as bpe _lowerCAmelCase : List[Any] = emoji _lowerCAmelCase : Tuple = np.max([len(_A ) for w in self.vocab.keys()] ) _lowerCAmelCase : Dict = re.compile(r'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' ) _lowerCAmelCase : int = re.compile(r'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' ) _lowerCAmelCase : Optional[int] = re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' ) _lowerCAmelCase : Optional[int] = re.compile( r'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' ) _lowerCAmelCase : Optional[int] = re.compile( r'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' ) _lowerCAmelCase : Dict = re.compile( r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' ) _lowerCAmelCase : str = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿' _lowerCAmelCase : List[str] = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟' _lowerCAmelCase : Union[str, Any] = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} ) def __len__( self ): '''simple docstring''' return len(self.ids_to_tokens ) def __lowerCamelCase ( self ,_A ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.content_repattera.sub('<URL>' ,_A ) _lowerCAmelCase : int = self.content_repattera.sub('<EMAIL>' ,_A ) _lowerCAmelCase : List[Any] = self.content_repattera.sub('<TEL>' ,_A ) _lowerCAmelCase : str = self.content_repattera.sub('<DATE>' ,_A ) _lowerCAmelCase : Union[str, Any] = self.content_repattera.sub('<DATE>' ,_A ) _lowerCAmelCase : Any = self.content_repattera.sub('<PRICE>' ,_A ) _lowerCAmelCase : Union[str, Any] = content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: _lowerCAmelCase : int = content.replace('<BLOCK><BLOCK>' ,'<BLOCK>' ) return content def __lowerCamelCase ( self ,_A ,_A=False ): '''simple docstring''' _lowerCAmelCase : Tuple = text.replace(' ' ,'<SP>' ) _lowerCAmelCase : Optional[Any] = text.replace(' ' ,'<SP>' ) _lowerCAmelCase : List[str] = text.replace('\r\n' ,'<BR>' ) _lowerCAmelCase : Dict = text.replace('\n' ,'<BR>' ) _lowerCAmelCase : int = text.replace('\r' ,'<BR>' ) _lowerCAmelCase : int = text.replace('\t' ,'<TAB>' ) _lowerCAmelCase : Optional[Any] = text.replace('—' ,'ー' ) _lowerCAmelCase : List[str] = text.replace('−' ,'ー' ) for k, v in self.emoji["emoji"].items(): if k in text: _lowerCAmelCase : List[Any] = text.replace(_A ,_A ) if clean: _lowerCAmelCase : Any = self.clean_text(_A ) def check_simbol(_A ): _lowerCAmelCase : Tuple = x.encode() if len(_A ) == 1 and len(_A ) == 2: _lowerCAmelCase : Union[str, Any] = (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0xC2A1 and c <= 0xC2BF) or (c >= 0xC780 and c <= 0xC783) or (c >= 0xCAB9 and c <= 0xCBBF) or (c >= 0xCC80 and c <= 0xCDA2) ): return True return False def checkuae(_A ): _lowerCAmelCase : Optional[Any] = x.encode() if len(_A ) == 1 and len(_A ) == 3: _lowerCAmelCase : Dict = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0xE2_8080 and c <= 0xE2_B07F: return True return False _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : int = [] while pos < len(_A ): _lowerCAmelCase : Tuple = min(len(_A ) ,pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3 _lowerCAmelCase : int = [] # (token_id, token, pos) for e in range(_A ,_A ,-1 ): _lowerCAmelCase : Optional[int] = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(_A ) > 2: _lowerCAmelCase : Optional[int] = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(_A ) > 0: # the smallest token_id is adopted _lowerCAmelCase : Union[str, Any] = sorted(_A ,key=lambda _A : x[0] )[0] result.append(_A ) _lowerCAmelCase : List[str] = e else: _lowerCAmelCase : Dict = pos + 1 _lowerCAmelCase : Tuple = text[pos:end] if check_simbol(_A ): result.append('<KIGOU>' ) elif checkuae(_A ): result.append('<U2000U2BFF>' ) else: for i in wd.encode('utf-8' ): result.append('<|byte%d|>' % i ) _lowerCAmelCase : str = end return result def __lowerCamelCase ( self ,_A ,_A="\n" ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = [] _lowerCAmelCase : Optional[int] = [] _lowerCAmelCase : List[str] = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(_A ) > 0: words.append(bytearray(_A ).decode('utf-8' ,errors='replace' ) ) _lowerCAmelCase : int = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji['emoji_inv'][word] ) elif word == "<SP>": words.append(' ' ) elif word == "<BR>": words.append(_A ) elif word == "<TAB>": words.append('\t' ) elif word == "<BLOCK>": words.append('▀' ) elif word == "<KIGOU>": words.append('ǀ' ) elif word == "<U2000U2BFF>": words.append('‖' ) else: words.append(_A ) if len(_A ) > 0: words.append(bytearray(_A ).decode('utf-8' ,errors='replace' ) ) _lowerCAmelCase : Tuple = ''.join(_A ) return text
710
"""simple docstring""" from __future__ import annotations def lowerCamelCase__ ( _lowerCamelCase ): '''simple docstring''' create_state_space_tree(_lowerCamelCase , [] , 0 , [0 for i in range(len(_lowerCamelCase ) )] ) def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ): '''simple docstring''' if index == len(_lowerCamelCase ): print(_lowerCamelCase ) return for i in range(len(_lowerCamelCase ) ): if not index_used[i]: current_sequence.append(sequence[i] ) _lowerCAmelCase : List[str] = True create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 , _lowerCamelCase ) current_sequence.pop() _lowerCAmelCase : int = False _lowerCAmelCase = [3, 1, 2, 4] generate_all_permutations(sequence) _lowerCAmelCase = ["A", "B", "C"] generate_all_permutations(sequence_a)
16
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) a :Dict = { "configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a :Tuple = ["ConvNextFeatureExtractor"] a :Any = ["ConvNextImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a :Tuple = [ "CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "ConvNextForImageClassification", "ConvNextModel", "ConvNextPreTrainedModel", "ConvNextBackbone", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a :Optional[int] = [ "TFConvNextForImageClassification", "TFConvNextModel", "TFConvNextPreTrainedModel", ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys a :Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
680
def UpperCamelCase ( _A : int )-> int: """simple docstring""" if not isinstance(_A , _A ): raise ValueError("multiplicative_persistence() only accepts integral values" ) if num < 0: raise ValueError("multiplicative_persistence() does not accept negative values" ) A__ = 0 A__ = str(_A ) while len(_A ) != 1: A__ = [int(_A ) for i in num_string] A__ = 1 for i in range(0 , len(_A ) ): total *= numbers[i] A__ = str(_A ) steps += 1 return steps def UpperCamelCase ( _A : int )-> int: """simple docstring""" if not isinstance(_A , _A ): raise ValueError("additive_persistence() only accepts integral values" ) if num < 0: raise ValueError("additive_persistence() does not accept negative values" ) A__ = 0 A__ = str(_A ) while len(_A ) != 1: A__ = [int(_A ) for i in num_string] A__ = 0 for i in range(0 , len(_A ) ): total += numbers[i] A__ = str(_A ) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
491
0
import re from filelock import FileLock try: import nltk UpperCamelCase_ : str = True except (ImportError, ModuleNotFoundError): UpperCamelCase_ : Tuple = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def lowerCamelCase_ ( _a : str ): '''simple docstring''' re.sub("""<n>""" , """""" , a_ ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(a_ ) )
713
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {'''vocab_file''': '''spm_char.model'''} UpperCamelCase_ = { '''vocab_file''': { '''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''', '''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''', '''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''', } } UpperCamelCase_ = { '''microsoft/speecht5_asr''': 1024, '''microsoft/speecht5_tts''': 1024, '''microsoft/speecht5_vc''': 1024, } class _snake_case ( __snake_case ): '''simple docstring''' A__ : Tuple = VOCAB_FILES_NAMES A__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP A__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : Union[str, Any] = ["input_ids", "attention_mask"] def __init__( self: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Dict="<s>" ,lowerCamelCase_: str="</s>" ,lowerCamelCase_: Tuple="<unk>" ,lowerCamelCase_: str="<pad>" ,lowerCamelCase_: Optional[Dict[str, Any]] = None ,**lowerCamelCase_: List[str] ,) -> None: UpperCAmelCase_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,sp_model_kwargs=self.sp_model_kwargs ,**lowerCamelCase_ ,) UpperCAmelCase_ : List[Any] = vocab_file UpperCAmelCase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCamelCase_ ) @property def A__ ( self: List[str] ) -> List[Any]: return self.sp_model.get_piece_size() def A__ ( self: List[Any] ) -> Dict: UpperCAmelCase_ : List[Any] = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self: Union[str, Any] ) -> int: UpperCAmelCase_ : Dict = self.__dict__.copy() UpperCAmelCase_ : Union[str, Any] = None return state def __setstate__( self: List[str] ,lowerCamelCase_: Any ) -> Any: UpperCAmelCase_ : str = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): UpperCAmelCase_ : Tuple = {} UpperCAmelCase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def A__ ( self: List[str] ,lowerCamelCase_: str ) -> List[str]: return self.sp_model.encode(lowerCamelCase_ ,out_type=lowerCamelCase_ ) def A__ ( self: str ,lowerCamelCase_: List[str] ) -> Optional[Any]: return self.sp_model.piece_to_id(lowerCamelCase_ ) def A__ ( self: Tuple ,lowerCamelCase_: int ) -> Optional[int]: UpperCAmelCase_ : int = self.sp_model.IdToPiece(lowerCamelCase_ ) return token def A__ ( self: Any ,lowerCamelCase_: Union[str, Any] ) -> int: UpperCAmelCase_ : Tuple = [] UpperCAmelCase_ : Any = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCamelCase_ ) + token UpperCAmelCase_ : Any = [] else: current_sub_tokens.append(lowerCamelCase_ ) out_string += self.sp_model.decode(lowerCamelCase_ ) return out_string.strip() def A__ ( self: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict=None ) -> List[int]: if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def A__ ( self: int ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ,lowerCamelCase_: bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ ,token_ids_a=lowerCamelCase_ ,already_has_special_tokens=lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = [1] if token_ids_a is None: return ([0] * len(lowerCamelCase_ )) + suffix_ones return ([0] * len(lowerCamelCase_ )) + ([0] * len(lowerCamelCase_ )) + suffix_ones def A__ ( self: Tuple ,lowerCamelCase_: str ,lowerCamelCase_: Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(lowerCamelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase_ : Optional[Any] = os.path.join( lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,lowerCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCamelCase_ ,"""wb""" ) as fi: UpperCAmelCase_ : List[Any] = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase_ ) return (out_vocab_file,)
322
0
from scipy.stats import spearmanr import datasets __lowercase : str =""" The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. """ __lowercase : Optional[Any] =""" Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {'spearmanr': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results['spearmanr']) -0.7 >>> print(round(results['spearmanr_pvalue'], 2)) 0.19 """ __lowercase : Dict =R"""\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , ) def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Any , _lowerCAmelCase: int=False ) -> str: '''simple docstring''' UpperCAmelCase_ =spearmanr(_lowerCAmelCase , _lowerCAmelCase ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
54
import logging from transformers import PretrainedConfig _a = logging.getLogger(__name__) _a = { "bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json", } class __A ( lowerCAmelCase ): '''simple docstring''' lowerCAmelCase_ = """bertabs""" def __init__( self , __lowerCAmelCase=3_0_5_2_2 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=6 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=8 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=0.2 , __lowerCAmelCase=6 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=8 , __lowerCAmelCase=2_0_4_8 , __lowerCAmelCase=0.2 , **__lowerCAmelCase , ): '''simple docstring''' super().__init__(**__lowerCAmelCase ) lowerCamelCase__ = vocab_size lowerCamelCase__ = max_pos lowerCamelCase__ = enc_layers lowerCamelCase__ = enc_hidden_size lowerCamelCase__ = enc_heads lowerCamelCase__ = enc_ff_size lowerCamelCase__ = enc_dropout lowerCamelCase__ = dec_layers lowerCamelCase__ = dec_hidden_size lowerCamelCase__ = dec_heads lowerCamelCase__ = dec_ff_size lowerCamelCase__ = dec_dropout
481
0
'''simple docstring''' import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() _snake_case : Dict = logging.get_logger() @dataclass class lowerCAmelCase : a : nn.Module a : List[nn.Module] = field(default_factory=__UpperCAmelCase ) a : list = field(default_factory=__UpperCAmelCase ) def lowercase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): _SCREAMING_SNAKE_CASE = len(list(m.modules() ) ) == 1 or isinstance(UpperCamelCase , nn.Convad ) or isinstance(UpperCamelCase , nn.BatchNormad ) if has_not_submodules: self.traced.append(UpperCamelCase ) def __call__( self , UpperCamelCase ): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(UpperCamelCase ) [x.remove() for x in self.handles] return self @property def lowercase ( self ): # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda UpperCamelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class lowerCAmelCase : a : nn.Module a : nn.Module a : int = 0 a : List = field(default_factory=__UpperCAmelCase ) a : List = field(default_factory=__UpperCAmelCase ) def __call__( self , UpperCamelCase ): _SCREAMING_SNAKE_CASE = Tracker(self.dest )(UpperCamelCase ).parametrized _SCREAMING_SNAKE_CASE = Tracker(self.src )(UpperCamelCase ).parametrized _SCREAMING_SNAKE_CASE = list(filter(lambda UpperCamelCase : type(UpperCamelCase ) not in self.src_skip , UpperCamelCase ) ) _SCREAMING_SNAKE_CASE = list(filter(lambda UpperCamelCase : type(UpperCamelCase ) not in self.dest_skip , UpperCamelCase ) ) if len(UpperCamelCase ) != len(UpperCamelCase ): raise Exception( F'Numbers of operations are different. Source module has {len(UpperCamelCase )} operations while' F' destination module has {len(UpperCamelCase )}.' ) for dest_m, src_m in zip(UpperCamelCase , UpperCamelCase ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'Transfered from={src_m} to={dest_m}' ) def _a ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : ResNetConfig , _SCREAMING_SNAKE_CASE : Path , _SCREAMING_SNAKE_CASE : bool = True ): print(F'Converting {name}...' ) with torch.no_grad(): _SCREAMING_SNAKE_CASE = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ).eval() _SCREAMING_SNAKE_CASE = ResNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval() _SCREAMING_SNAKE_CASE = ModuleTransfer(src=_SCREAMING_SNAKE_CASE , dest=_SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE = torch.randn((1, 3, 224, 224) ) module_transfer(_SCREAMING_SNAKE_CASE ) assert torch.allclose(from_model(_SCREAMING_SNAKE_CASE ) , our_model(_SCREAMING_SNAKE_CASE ).logits ), "The model logits don't match the original one." _SCREAMING_SNAKE_CASE = F'resnet{"-".join(name.split("resnet" ) )}' print(_SCREAMING_SNAKE_CASE ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=_SCREAMING_SNAKE_CASE , ) # we can use the convnext one _SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=_SCREAMING_SNAKE_CASE , ) print(F'Pushed {checkpoint_name}' ) def _a ( _SCREAMING_SNAKE_CASE : Path , _SCREAMING_SNAKE_CASE : str = None , _SCREAMING_SNAKE_CASE : bool = True ): _SCREAMING_SNAKE_CASE = "imagenet-1k-id2label.json" _SCREAMING_SNAKE_CASE = 1000 _SCREAMING_SNAKE_CASE = (1, num_labels) _SCREAMING_SNAKE_CASE = "huggingface/label-files" _SCREAMING_SNAKE_CASE = num_labels _SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) ) _SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} _SCREAMING_SNAKE_CASE = idalabel _SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} _SCREAMING_SNAKE_CASE = partial(_SCREAMING_SNAKE_CASE , num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid=_SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE = { "resnet18": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ), "resnet26": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ), "resnet34": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ), "resnet50": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ), "resnet101": ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ), "resnet152": ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ), } if model_name: convert_weight_and_push(_SCREAMING_SNAKE_CASE , names_to_config[model_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return config, expected_shape if __name__ == "__main__": _snake_case : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported resnet* architecture,""" """ currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) _snake_case : Optional[int] = parser.parse_args() _snake_case : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
493
'''simple docstring''' import os import random import sys from . import cryptomath_module as cryptoMath # noqa: N812 from . import rabin_miller as rabinMiller # noqa: N812 def _a ( ): print("Making key files..." ) make_key_files("rsa" , 1024 ) print("Key files generation successful." ) def _a ( _SCREAMING_SNAKE_CASE : int ): print("Generating prime p..." ) _SCREAMING_SNAKE_CASE = rabinMiller.generate_large_prime(_SCREAMING_SNAKE_CASE ) print("Generating prime q..." ) _SCREAMING_SNAKE_CASE = rabinMiller.generate_large_prime(_SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE = p * q print("Generating e that is relatively prime to (p - 1) * (q - 1)..." ) while True: _SCREAMING_SNAKE_CASE = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) ) if cryptoMath.gcd(_SCREAMING_SNAKE_CASE , (p - 1) * (q - 1) ) == 1: break print("Calculating d that is mod inverse of e..." ) _SCREAMING_SNAKE_CASE = cryptoMath.find_mod_inverse(_SCREAMING_SNAKE_CASE , (p - 1) * (q - 1) ) _SCREAMING_SNAKE_CASE = (n, e) _SCREAMING_SNAKE_CASE = (n, d) return (public_key, private_key) def _a ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ): if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ): print("\nWARNING:" ) print( F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n' "Use a different name or delete these files and re-run this program." ) sys.exit() _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = generate_key(_SCREAMING_SNAKE_CASE ) print(F'\nWriting public key to file {name}_pubkey.txt...' ) with open(F'{name}_pubkey.txt' , "w" ) as out_file: out_file.write(F'{key_size},{public_key[0]},{public_key[1]}' ) print(F'Writing private key to file {name}_privkey.txt...' ) with open(F'{name}_privkey.txt' , "w" ) as out_file: out_file.write(F'{key_size},{private_key[0]},{private_key[1]}' ) if __name__ == "__main__": main()
493
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ): lowerCAmelCase__ = StableDiffusionInpaintPipeline lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowerCAmelCase__ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowerCAmelCase__ = frozenset([] ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : List[str] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , ) UpperCAmelCase__ : int = PNDMScheduler(skip_prk_steps=_A ) torch.manual_seed(0 ) UpperCAmelCase__ : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) UpperCAmelCase__ : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , ) UpperCAmelCase__ : Union[str, Any] = CLIPTextModel(_A ) UpperCAmelCase__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) UpperCAmelCase__ : str = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def lowercase_ ( self : str , _A : Dict , _A : Any=0 ): '''simple docstring''' UpperCAmelCase__ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) UpperCAmelCase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase__ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((64, 64) ) UpperCAmelCase__ : int = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) ) if str(_A ).startswith('''mps''' ): UpperCAmelCase__ : List[Any] = torch.manual_seed(_A ) else: UpperCAmelCase__ : str = torch.Generator(device=_A ).manual_seed(_A ) UpperCAmelCase__ : Optional[int] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': init_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCAmelCase__ : Tuple = self.get_dummy_components() UpperCAmelCase__ : str = StableDiffusionInpaintPipeline(**_A ) UpperCAmelCase__ : List[str] = sd_pipe.to(_A ) sd_pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase__ : Dict = self.get_dummy_inputs(_A ) UpperCAmelCase__ : Any = sd_pipe(**_A ).images UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase__ : int = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self : Tuple ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : List[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) UpperCAmelCase__ : Any = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) UpperCAmelCase__ : List[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint''' '''/yellow_cat_sitting_on_a_park_bench.npy''' ) UpperCAmelCase__ : Dict = '''stabilityai/stable-diffusion-2-inpainting''' UpperCAmelCase__ : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(_A , safety_checker=_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) pipe.enable_attention_slicing() UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench''' UpperCAmelCase__ : str = torch.manual_seed(0 ) UpperCAmelCase__ : str = pipe( prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , ) UpperCAmelCase__ : int = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 9e-3 def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) UpperCAmelCase__ : Any = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) UpperCAmelCase__ : Union[str, Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint''' '''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' ) UpperCAmelCase__ : Tuple = '''stabilityai/stable-diffusion-2-inpainting''' UpperCAmelCase__ : Any = StableDiffusionInpaintPipeline.from_pretrained( _A , torch_dtype=torch.floataa , safety_checker=_A , ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) pipe.enable_attention_slicing() UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench''' UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(0 ) UpperCAmelCase__ : Optional[Any] = pipe( prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , ) UpperCAmelCase__ : Tuple = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5e-1 def lowercase_ ( self : Any ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCAmelCase__ : Union[str, Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) UpperCAmelCase__ : Dict = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) UpperCAmelCase__ : Optional[Any] = '''stabilityai/stable-diffusion-2-inpainting''' UpperCAmelCase__ : str = PNDMScheduler.from_pretrained(_A , subfolder='''scheduler''' ) UpperCAmelCase__ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained( _A , safety_checker=_A , scheduler=_A , torch_dtype=torch.floataa , ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() UpperCAmelCase__ : Optional[int] = '''Face of a yellow cat, high resolution, sitting on a park bench''' UpperCAmelCase__ : Tuple = torch.manual_seed(0 ) UpperCAmelCase__ : Any = pipe( prompt=_A , image=_A , mask_image=_A , generator=_A , num_inference_steps=2 , output_type='''np''' , ) UpperCAmelCase__ : int = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.6_5 * 10**9
75
'''simple docstring''' import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def lowerCAmelCase_ ( a : Optional[int] , a : Tuple=False ): try: a__ = os.environ[key] except KeyError: # KEY isn't set, default to `default`. a__ = default else: # KEY is set, convert it to True or False. try: a__ = strtobool(a ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'''If set, {key} must be yes or no.''' ) return _value __A : Optional[int] = parse_flag_from_env('RUN_SLOW', default=False) def lowerCAmelCase_ ( a : List[str] ): return unittest.skip('Test was skipped' )(a ) def lowerCAmelCase_ ( a : Union[str, Any] ): return unittest.skipUnless(_run_slow_tests , 'test is slow' )(a ) def lowerCAmelCase_ ( a : Dict ): return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(a ) def lowerCAmelCase_ ( a : str ): return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(a ) def lowerCAmelCase_ ( a : Union[str, Any] ): return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(a ) def lowerCAmelCase_ ( a : Union[str, Any] ): return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(a ) def lowerCAmelCase_ ( a : Optional[int] ): return unittest.skipUnless( is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(a ) def lowerCAmelCase_ ( a : Optional[Any] ): return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(a ) def lowerCAmelCase_ ( a : List[str] ): return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(a ) def lowerCAmelCase_ ( a : Tuple ): return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(a ) def lowerCAmelCase_ ( a : Dict ): return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(a ) def lowerCAmelCase_ ( a : List[str] ): return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(a ) def lowerCAmelCase_ ( a : Dict ): return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(a ) def lowerCAmelCase_ ( a : Tuple ): return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(a ) def lowerCAmelCase_ ( a : int ): return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(a ) def lowerCAmelCase_ ( a : Union[str, Any] ): return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(a ) def lowerCAmelCase_ ( a : int=None , a : Dict=None ): if test_case is None: return partial(a , version=a ) return unittest.skipUnless(is_torch_version('>=' , a ) , f'''test requires torch version >= {version}''' )(a ) def lowerCAmelCase_ ( a : Any ): return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(a ) def lowerCAmelCase_ ( a : str ): return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(a ) def lowerCAmelCase_ ( a : int ): return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(a ) __A : Optional[Any] = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def lowerCAmelCase_ ( a : int ): return unittest.skipUnless( _atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(a ) class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE:int = True @classmethod def lowercase__ ( cls ): """simple docstring""" a__ = tempfile.mkdtemp() @classmethod def lowercase__ ( cls ): """simple docstring""" if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def lowercase__ ( self ): """simple docstring""" if self.clear_on_setup: for path in Path(self.tmpdir ).glob('**/*' ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(_a ) class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def lowercase__ ( self ): """simple docstring""" super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def lowercase__ ( self , _a ): """simple docstring""" a__ = mocks if isinstance(_a , (tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def lowerCAmelCase_ ( a : List[str] ): a__ = AcceleratorState() a__ = tensor[None].clone().to(state.device ) a__ = gather(a ).cpu() a__ = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , a ): return False return True class _UpperCamelCase : '''simple docstring''' def __init__( self , _a , _a , _a ): """simple docstring""" a__ = returncode a__ = stdout a__ = stderr async def lowerCAmelCase_ ( a : Any , a : int ): while True: a__ = await stream.readline() if line: callback(a ) else: break async def lowerCAmelCase_ ( a : int , a : Tuple=None , a : Optional[Any]=None , a : Tuple=None , a : str=False , a : Dict=False ): if echo: print('\nRunning: ' , ' '.join(a ) ) a__ = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=a , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=a , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) a__ = [] a__ = [] def tee(a : str , a : Optional[Any] , a : Any , a : Optional[int]="" ): a__ = line.decode('utf-8' ).rstrip() sink.append(a ) if not quiet: print(a , a , file=a ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda a : tee(a , a , sys.stdout , label='stdout:' ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda a : tee(a , a , sys.stderr , label='stderr:' ) ) ), ] , timeout=a , ) return _RunOutput(await p.wait() , a , a ) def lowerCAmelCase_ ( a : Union[str, Any] , a : str=None , a : Dict=None , a : List[Any]=180 , a : Optional[Any]=False , a : int=True ): a__ = asyncio.get_event_loop() a__ = loop.run_until_complete( _stream_subprocess(a , env=a , stdin=a , timeout=a , quiet=a , echo=a ) ) a__ = ' '.join(a ) if result.returncode > 0: a__ = '\n'.join(result.stderr ) raise RuntimeError( f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' f'''The combined stderr from workers follows:\n{stderr}''' ) return result class _UpperCamelCase ( _A ): '''simple docstring''' pass def lowerCAmelCase_ ( a : List[str] , a : Dict=False ): try: a__ = subprocess.check_output(a , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(a , 'decode' ): a__ = output.decode('utf-8' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( f'''Command `{' '.join(a )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
394
0
from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__: Dict = logging.get_logger(__name__) lowerCAmelCase__: Dict = { "google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json", } class snake_case_ ( _lowercase ): __lowerCamelCase : List[str] = '''efficientnet''' def __init__( self , __lowerCAmelCase = 3 , __lowerCAmelCase = 600 , __lowerCAmelCase = 2.0 , __lowerCAmelCase = 3.1 , __lowerCAmelCase = 8 , __lowerCAmelCase = [3, 3, 5, 3, 5, 5, 3] , __lowerCAmelCase = [32, 16, 24, 40, 80, 112, 192] , __lowerCAmelCase = [16, 24, 40, 80, 112, 192, 320] , __lowerCAmelCase = [] , __lowerCAmelCase = [1, 2, 2, 2, 1, 2, 1] , __lowerCAmelCase = [1, 2, 2, 3, 3, 4, 1] , __lowerCAmelCase = [1, 6, 6, 6, 6, 6, 6] , __lowerCAmelCase = 0.25 , __lowerCAmelCase = "swish" , __lowerCAmelCase = 2_560 , __lowerCAmelCase = "mean" , __lowerCAmelCase = 0.02 , __lowerCAmelCase = 0.0_01 , __lowerCAmelCase = 0.99 , __lowerCAmelCase = 0.5 , __lowerCAmelCase = 0.2 , **__lowerCAmelCase , ): super().__init__(**A_ ) SCREAMING_SNAKE_CASE_ : Any = num_channels SCREAMING_SNAKE_CASE_ : int = image_size SCREAMING_SNAKE_CASE_ : Optional[int] = width_coefficient SCREAMING_SNAKE_CASE_ : Optional[int] = depth_coefficient SCREAMING_SNAKE_CASE_ : Optional[int] = depth_divisor SCREAMING_SNAKE_CASE_ : Tuple = kernel_sizes SCREAMING_SNAKE_CASE_ : Union[str, Any] = in_channels SCREAMING_SNAKE_CASE_ : Union[str, Any] = out_channels SCREAMING_SNAKE_CASE_ : List[Any] = depthwise_padding SCREAMING_SNAKE_CASE_ : Optional[int] = strides SCREAMING_SNAKE_CASE_ : Optional[int] = num_block_repeats SCREAMING_SNAKE_CASE_ : Any = expand_ratios SCREAMING_SNAKE_CASE_ : List[str] = squeeze_expansion_ratio SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_act SCREAMING_SNAKE_CASE_ : List[Any] = hidden_dim SCREAMING_SNAKE_CASE_ : Union[str, Any] = pooling_type SCREAMING_SNAKE_CASE_ : int = initializer_range SCREAMING_SNAKE_CASE_ : int = batch_norm_eps SCREAMING_SNAKE_CASE_ : Tuple = batch_norm_momentum SCREAMING_SNAKE_CASE_ : List[str] = dropout_rate SCREAMING_SNAKE_CASE_ : Optional[int] = drop_connect_rate SCREAMING_SNAKE_CASE_ : Union[str, Any] = sum(A_ ) * 4 class snake_case_ ( _lowercase ): __lowerCamelCase : Optional[int] = version.parse('1.11' ) @property def __A ( self ): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def __A ( self ): return 1e-5
700
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available lowerCAmelCase__: Dict = { "configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__: List[str] = [ "GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoForCausalLM", "GPTNeoForQuestionAnswering", "GPTNeoForSequenceClassification", "GPTNeoForTokenClassification", "GPTNeoModel", "GPTNeoPreTrainedModel", "load_tf_weights_in_gpt_neo", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__: Union[str, Any] = [ "FlaxGPTNeoForCausalLM", "FlaxGPTNeoModel", "FlaxGPTNeoPreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys lowerCAmelCase__: Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
311
0