code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) SCREAMING_SNAKE_CASE = [ 'cross_validation.py', 'gradient_accumulation.py', 'local_sgd.py', 'multi_process_metrics.py', 'memory.py', 'automatic_gradient_accumulation.py', 'fsdp_with_peak_mem_tracking.py', 'deepspeed_with_config_support.py', 'megatron_lm_gpt_pretraining.py', ] class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def A__ ( self : str , UpperCAmelCase : str , UpperCAmelCase : bool , UpperCAmelCase : str = None , UpperCAmelCase : list = None ) -> Optional[int]: '''simple docstring''' lowercase : Optional[Any] =None lowercase : Any =os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) ) lowercase : List[str] =os.path.abspath('''examples''' ) for item in os.listdir(UpperCAmelCase ): if item not in EXCLUDE_EXAMPLES: lowercase : Union[str, Any] =os.path.join(UpperCAmelCase , UpperCAmelCase ) if os.path.isfile(UpperCAmelCase ) and ".py" in item_path: with self.subTest( tested_script=UpperCAmelCase , feature_script=UpperCAmelCase , tested_section='''main()''' if parser_only else '''training_function()''' , ): lowercase : List[Any] =compare_against_test( os.path.join(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) lowercase : Optional[int] ='''\n'''.join(UpperCAmelCase ) if special_strings is not None: for string in special_strings: lowercase : Optional[Any] =diff.replace(UpperCAmelCase , '''''' ) self.assertEqual(UpperCAmelCase , '''''' ) def A__ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' self.one_complete_example('''complete_nlp_example.py''' , UpperCAmelCase ) self.one_complete_example('''complete_nlp_example.py''' , UpperCAmelCase ) def A__ ( self : str ) -> Any: '''simple docstring''' lowercase : str =os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) ) lowercase : str =[ ''' ''' * 16 + '''{\n\n''', ''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''', ''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''', ''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''', ''' ''' * 20 + '''"epoch": epoch,\n\n''', ''' ''' * 16 + '''},\n\n''', ''' ''' * 16 + '''step=epoch,\n''', ''' ''' * 12, ''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''', ] self.one_complete_example('''complete_cv_example.py''' , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) self.one_complete_example('''complete_cv_example.py''' , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) @mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} ) class UpperCAmelCase_ ( __A ): """simple docstring""" UpperCamelCase_ = False @classmethod def A__ ( cls : Optional[Any] ) -> List[Any]: '''simple docstring''' super().setUpClass() lowercase : List[str] =tempfile.mkdtemp() lowercase : Union[str, Any] =os.path.join(cls._tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) lowercase : Any =['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def A__ ( cls : Optional[Any] ) -> Tuple: '''simple docstring''' super().tearDownClass() shutil.rmtree(cls._tmpdir ) def A__ ( self : List[str] ) -> Tuple: '''simple docstring''' lowercase : Dict =f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) ) def A__ ( self : int ) -> Union[str, Any]: '''simple docstring''' lowercase : Dict =f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split() lowercase : str =run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) ) def A__ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' lowercase : Optional[int] =f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split() lowercase : Any =run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase ) self.assertNotIn('''epoch 0:''' , UpperCAmelCase ) self.assertIn('''epoch 1:''' , UpperCAmelCase ) def A__ ( self : str ) -> List[Any]: '''simple docstring''' lowercase : List[str] =f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split() lowercase : Tuple =run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase ) if torch.cuda.is_available(): lowercase : List[Any] =torch.cuda.device_count() else: lowercase : Dict =1 if num_processes > 1: self.assertNotIn('''epoch 0:''' , UpperCAmelCase ) self.assertIn('''epoch 1:''' , UpperCAmelCase ) else: self.assertIn('''epoch 0:''' , UpperCAmelCase ) self.assertIn('''epoch 1:''' , UpperCAmelCase ) @slow def A__ ( self : Any ) -> Union[str, Any]: '''simple docstring''' lowercase : List[Any] =''' examples/by_feature/cross_validation.py --num_folds 2 '''.split() with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ): lowercase : List[str] =run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase ) lowercase : List[Any] =re.findall('''({.+})''' , UpperCAmelCase ) lowercase : Tuple =[r for r in results if '''accuracy''' in r][-1] lowercase : Union[str, Any] =ast.literal_eval(UpperCAmelCase ) self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 ) def A__ ( self : Any ) -> Union[str, Any]: '''simple docstring''' lowercase : Any =['''examples/by_feature/multi_process_metrics.py'''] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def A__ ( self : int ) -> List[str]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: lowercase : Tuple =f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase , '''tracking''' ) ) ) def A__ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' lowercase : Optional[Any] =['''examples/by_feature/gradient_accumulation.py'''] run_command(self._launch_args + testargs ) def A__ ( self : Dict ) -> Tuple: '''simple docstring''' lowercase : List[str] =['''examples/by_feature/local_sgd.py'''] run_command(self._launch_args + testargs )
94
'''simple docstring''' from queue import PriorityQueue from typing import Any import numpy as np def lowercase_ ( __A : dict , __A : str , __A : set , __A : set , __A : dict , __A : dict , __A : PriorityQueue , __A : dict , __A : float | int , ) -> float | int: """simple docstring""" for nxt, d in graph[v]: if nxt in visited_forward: continue lowercase : str =cst_fwd.get(__A , np.inf ) lowercase : str =cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) lowercase : int =new_cost_f lowercase : Optional[int] =v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: lowercase : List[Any] =cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def lowercase_ ( __A : str , __A : str , __A : dict , __A : dict ) -> int: """simple docstring""" lowercase : Optional[Any] =-1 lowercase : Tuple =set() lowercase : str =set() lowercase : List[str] ={source: 0} lowercase : List[str] ={destination: 0} lowercase : Tuple ={source: None} lowercase : List[Any] ={destination: None} lowercase : PriorityQueue[Any] =PriorityQueue() lowercase : PriorityQueue[Any] =PriorityQueue() lowercase : Any =np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): lowercase , lowercase : int =queue_forward.get() visited_forward.add(__A ) lowercase , lowercase : int =queue_backward.get() visited_backward.add(__A ) lowercase : Dict =pass_and_relaxation( __A , __A , __A , __A , __A , __A , __A , __A , __A , ) lowercase : List[Any] =pass_and_relaxation( __A , __A , __A , __A , __A , __A , __A , __A , __A , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: lowercase : Union[str, Any] =shortest_distance return shortest_path_distance SCREAMING_SNAKE_CASE = { 'B': [['C', 1]], 'C': [['D', 1]], 'D': [['F', 1]], 'E': [['B', 1], ['G', 2]], 'F': [], 'G': [['F', 1]], } SCREAMING_SNAKE_CASE = { 'B': [['E', 1]], 'C': [['B', 1]], 'D': [['C', 1]], 'F': [['D', 1], ['G', 1]], 'E': [[None, np.inf]], 'G': [['E', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
94
1
"""simple docstring""" import re def lowerCamelCase_ ( UpperCAmelCase_ ) ->bool: """simple docstring""" __UpperCAmelCase : str = re.compile( R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' ) return bool(re.search(_lowerCamelCase , _lowerCamelCase ) ) if __name__ == "__main__": lowercase__ :List[str] = '0094702343221' print(is_sri_lankan_phone_number(phone))
706
"""simple docstring""" from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar lowercase__ :int = TypeVar('T') class snake_case ( Generic[T] ): '''simple docstring''' def __init__( self : Optional[Any] , __lowercase : list[T] , __lowercase : Callable[[T, T], T] ): '''simple docstring''' __UpperCAmelCase : Any | T = None __UpperCAmelCase : int = len(__lowercase ) __UpperCAmelCase : list[T] = [any_type for _ in range(self.N )] + arr __UpperCAmelCase : List[Any] = fnc self.build() def A_ ( self : str ): '''simple docstring''' for p in range(self.N - 1 , 0 , -1 ): __UpperCAmelCase : Any = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def A_ ( self : Union[str, Any] , __lowercase : int , __lowercase : T ): '''simple docstring''' p += self.N __UpperCAmelCase : Tuple = v while p > 1: __UpperCAmelCase : Union[str, Any] = p // 2 __UpperCAmelCase : Any = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def A_ ( self : str , __lowercase : int , __lowercase : int ): # noqa: E741 '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : int = l + self.N, r + self.N __UpperCAmelCase : T | None = None while l <= r: if l % 2 == 1: __UpperCAmelCase : Any = self.st[l] if res is None else self.fn(__lowercase , self.st[l] ) if r % 2 == 0: __UpperCAmelCase : Any = self.st[r] if res is None else self.fn(__lowercase , self.st[r] ) __UpperCAmelCase , __UpperCAmelCase : Any = (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce lowercase__ :Any = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2] lowercase__ :List[Any] = { 0: 7, 1: 2, 2: 6, 3: -1_4, 4: 5, 5: 4, 6: 7, 7: -1_0, 8: 9, 9: 1_0, 1_0: 1_2, 1_1: 1, } lowercase__ :List[str] = SegmentTree(test_array, min) lowercase__ :List[str] = SegmentTree(test_array, max) lowercase__ :Tuple = SegmentTree(test_array, lambda a, b: a + b) def lowerCamelCase_ ( ) ->None: """simple docstring""" for i in range(len(UpperCAmelCase_ ) ): for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ): __UpperCAmelCase : Optional[Any] = reduce(UpperCAmelCase_ , test_array[i : j + 1] ) __UpperCAmelCase : Optional[int] = reduce(UpperCAmelCase_ , test_array[i : j + 1] ) __UpperCAmelCase : Union[str, Any] = reduce(lambda UpperCAmelCase_ , UpperCAmelCase_ : a + b , test_array[i : j + 1] ) assert min_range == min_segment_tree.query(UpperCAmelCase_ , UpperCAmelCase_ ) assert max_range == max_segment_tree.query(UpperCAmelCase_ , UpperCAmelCase_ ) assert sum_range == sum_segment_tree.query(UpperCAmelCase_ , UpperCAmelCase_ ) test_all_segments() for index, value in test_updates.items(): lowercase__ :Optional[Any] = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
374
0
import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _UpperCAmelCase : def __init__( self : Any , UpperCAmelCase : Any , UpperCAmelCase : int=13 , UpperCAmelCase : Optional[int]=32 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Any=3 , UpperCAmelCase : List[str]=16 , UpperCAmelCase : int=[1, 2, 1] , UpperCAmelCase : List[str]=[2, 2, 4] , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Optional[Any]=2.0 , UpperCAmelCase : Tuple=True , UpperCAmelCase : int=0.0 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Any="gelu" , UpperCAmelCase : int=False , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Any=0.02 , UpperCAmelCase : Union[str, Any]=1E-5 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : int=None , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : str=10 , UpperCAmelCase : Union[str, Any]=8 , ): SCREAMING_SNAKE_CASE_ :int = parent SCREAMING_SNAKE_CASE_ :Union[str, Any] = batch_size SCREAMING_SNAKE_CASE_ :Dict = image_size SCREAMING_SNAKE_CASE_ :List[str] = patch_size SCREAMING_SNAKE_CASE_ :Optional[Any] = num_channels SCREAMING_SNAKE_CASE_ :Dict = embed_dim SCREAMING_SNAKE_CASE_ :Dict = depths SCREAMING_SNAKE_CASE_ :Dict = num_heads SCREAMING_SNAKE_CASE_ :Tuple = window_size SCREAMING_SNAKE_CASE_ :Optional[int] = mlp_ratio SCREAMING_SNAKE_CASE_ :Dict = qkv_bias SCREAMING_SNAKE_CASE_ :Optional[int] = hidden_dropout_prob SCREAMING_SNAKE_CASE_ :str = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ :Tuple = drop_path_rate SCREAMING_SNAKE_CASE_ :Optional[int] = hidden_act SCREAMING_SNAKE_CASE_ :str = use_absolute_embeddings SCREAMING_SNAKE_CASE_ :List[str] = patch_norm SCREAMING_SNAKE_CASE_ :List[Any] = layer_norm_eps SCREAMING_SNAKE_CASE_ :Optional[int] = initializer_range SCREAMING_SNAKE_CASE_ :List[Any] = is_training SCREAMING_SNAKE_CASE_ :Tuple = scope SCREAMING_SNAKE_CASE_ :Any = use_labels SCREAMING_SNAKE_CASE_ :Union[str, Any] = type_sequence_label_size SCREAMING_SNAKE_CASE_ :int = encoder_stride def _snake_case ( self : Optional[Any]): SCREAMING_SNAKE_CASE_ :str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) SCREAMING_SNAKE_CASE_ :Dict = None if self.use_labels: SCREAMING_SNAKE_CASE_ :Any = ids_tensor([self.batch_size] , self.type_sequence_label_size) SCREAMING_SNAKE_CASE_ :List[Any] = self.get_config() return config, pixel_values, labels def _snake_case ( self : Tuple): return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def _snake_case ( self : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str): SCREAMING_SNAKE_CASE_ :int = SwinvaModel(config=UpperCAmelCase) model.to(UpperCAmelCase) model.eval() SCREAMING_SNAKE_CASE_ :int = model(UpperCAmelCase) SCREAMING_SNAKE_CASE_ :List[str] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) SCREAMING_SNAKE_CASE_ :List[str] = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim)) def _snake_case ( self : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple): SCREAMING_SNAKE_CASE_ :Union[str, Any] = SwinvaForMaskedImageModeling(config=UpperCAmelCase) model.to(UpperCAmelCase) model.eval() SCREAMING_SNAKE_CASE_ :Union[str, Any] = model(UpperCAmelCase) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size)) # test greyscale images SCREAMING_SNAKE_CASE_ :str = 1 SCREAMING_SNAKE_CASE_ :Union[str, Any] = SwinvaForMaskedImageModeling(UpperCAmelCase) model.to(UpperCAmelCase) model.eval() SCREAMING_SNAKE_CASE_ :Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) SCREAMING_SNAKE_CASE_ :Optional[int] = model(UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size)) def _snake_case ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : Any): SCREAMING_SNAKE_CASE_ :Tuple = self.type_sequence_label_size SCREAMING_SNAKE_CASE_ :List[str] = SwinvaForImageClassification(UpperCAmelCase) model.to(UpperCAmelCase) model.eval() SCREAMING_SNAKE_CASE_ :str = model(UpperCAmelCase , labels=UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def _snake_case ( self : int): SCREAMING_SNAKE_CASE_ :Optional[int] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Union[str, Any] = config_and_inputs SCREAMING_SNAKE_CASE_ :List[str] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _UpperCAmelCase ( lowercase , lowercase , unittest.TestCase ): lowerCamelCase_ : Any = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) lowerCamelCase_ : Dict = ( {"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification} if is_torch_available() else {} ) lowerCamelCase_ : Tuple = False lowerCamelCase_ : List[Any] = False lowerCamelCase_ : Dict = False lowerCamelCase_ : Union[str, Any] = False def _snake_case ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_ :Any = SwinvaModelTester(self) SCREAMING_SNAKE_CASE_ :Optional[int] = ConfigTester(self , config_class=UpperCAmelCase , embed_dim=37) def _snake_case ( self : int): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _snake_case ( self : List[str]): SCREAMING_SNAKE_CASE_ :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase) @unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0.") def _snake_case ( self : Union[str, Any]): pass @unittest.skip(reason="Swinv2 does not use inputs_embeds") def _snake_case ( self : Union[str, Any]): pass def _snake_case ( self : int): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ :Optional[Any] = model_class(UpperCAmelCase) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) SCREAMING_SNAKE_CASE_ :Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear)) def _snake_case ( self : str): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ :int = model_class(UpperCAmelCase) SCREAMING_SNAKE_CASE_ :Tuple = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE_ :Any = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE_ :Any = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCAmelCase) def _snake_case ( self : List[str]): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :str = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ :Optional[Any] = True for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ :str = True SCREAMING_SNAKE_CASE_ :Optional[Any] = False SCREAMING_SNAKE_CASE_ :int = True SCREAMING_SNAKE_CASE_ :Tuple = model_class(UpperCAmelCase) model.to(UpperCAmelCase) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_ :Dict = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase)) SCREAMING_SNAKE_CASE_ :Tuple = outputs.attentions SCREAMING_SNAKE_CASE_ :Optional[int] = len(self.model_tester.depths) self.assertEqual(len(UpperCAmelCase) , UpperCAmelCase) # check that output_attentions also work using config del inputs_dict["output_attentions"] SCREAMING_SNAKE_CASE_ :List[Any] = True SCREAMING_SNAKE_CASE_ :Dict = config.window_size**2 SCREAMING_SNAKE_CASE_ :Optional[int] = model_class(UpperCAmelCase) model.to(UpperCAmelCase) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_ :List[Any] = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase)) SCREAMING_SNAKE_CASE_ :List[str] = outputs.attentions self.assertEqual(len(UpperCAmelCase) , UpperCAmelCase) self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) SCREAMING_SNAKE_CASE_ :Optional[Any] = len(UpperCAmelCase) # Check attention is always last and order is fine SCREAMING_SNAKE_CASE_ :int = True SCREAMING_SNAKE_CASE_ :int = True SCREAMING_SNAKE_CASE_ :Tuple = model_class(UpperCAmelCase) model.to(UpperCAmelCase) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_ :List[Any] = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase)) if hasattr(self.model_tester , "num_hidden_states_types"): SCREAMING_SNAKE_CASE_ :List[Any] = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states SCREAMING_SNAKE_CASE_ :int = 2 self.assertEqual(out_len + added_hidden_states , len(UpperCAmelCase)) SCREAMING_SNAKE_CASE_ :Dict = outputs.attentions self.assertEqual(len(UpperCAmelCase) , UpperCAmelCase) self.assertListEqual( list(self_attentions[0].shape[-3:]) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def _snake_case ( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : int): SCREAMING_SNAKE_CASE_ :Optional[int] = model_class(UpperCAmelCase) model.to(UpperCAmelCase) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_ :str = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase)) SCREAMING_SNAKE_CASE_ :int = outputs.hidden_states SCREAMING_SNAKE_CASE_ :List[str] = getattr( self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths) + 1) self.assertEqual(len(UpperCAmelCase) , UpperCAmelCase) # Swinv2 has a different seq_length SCREAMING_SNAKE_CASE_ :Dict = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) SCREAMING_SNAKE_CASE_ :str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , ) SCREAMING_SNAKE_CASE_ :str = outputs.reshaped_hidden_states self.assertEqual(len(UpperCAmelCase) , UpperCAmelCase) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Dict = reshaped_hidden_states[0].shape SCREAMING_SNAKE_CASE_ :Union[str, Any] = ( reshaped_hidden_states[0].view(UpperCAmelCase , UpperCAmelCase , height * width).permute(0 , 2 , 1) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , ) def _snake_case ( self : Optional[int]): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ :Tuple = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ :Optional[Any] = True self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE_ :List[str] = True self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase) def _snake_case ( self : List[Any]): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ :List[str] = 3 SCREAMING_SNAKE_CASE_ :str = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) SCREAMING_SNAKE_CASE_ :int = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) SCREAMING_SNAKE_CASE_ :str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) SCREAMING_SNAKE_CASE_ :List[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ :Dict = True self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width)) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE_ :List[Any] = True self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width)) def _snake_case ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase) def _snake_case ( self : int): SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase) @slow def _snake_case ( self : Dict): for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ :Optional[int] = SwinvaModel.from_pretrained(UpperCAmelCase) self.assertIsNotNone(UpperCAmelCase) def _snake_case ( self : Tuple): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ :Optional[Any] = _config_zero_init(UpperCAmelCase) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ :List[str] = model_class(config=UpperCAmelCase) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , ) @require_vision @require_torch class _UpperCAmelCase ( unittest.TestCase ): @cached_property def _snake_case ( self : Any): return ( AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256") if is_vision_available() else None ) @slow def _snake_case ( self : Tuple): SCREAMING_SNAKE_CASE_ :Optional[int] = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256").to( UpperCAmelCase) SCREAMING_SNAKE_CASE_ :Tuple = self.default_image_processor SCREAMING_SNAKE_CASE_ :Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") SCREAMING_SNAKE_CASE_ :Optional[int] = image_processor(images=UpperCAmelCase , return_tensors="pt").to(UpperCAmelCase) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE_ :Tuple = model(**UpperCAmelCase) # verify the logits SCREAMING_SNAKE_CASE_ :int = torch.Size((1, 10_00)) self.assertEqual(outputs.logits.shape , UpperCAmelCase) SCREAMING_SNAKE_CASE_ :Union[str, Any] = torch.tensor([-0.3947, -0.4306, 0.0026]).to(UpperCAmelCase) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4))
631
import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def lowercase ( a , a ): '''simple docstring''' if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer SCREAMING_SNAKE_CASE_ :List[Any] = flax_key_tuple[:-1] + ("weight",) SCREAMING_SNAKE_CASE_ :Any = torch.permute(a , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(a ): # linear layer SCREAMING_SNAKE_CASE_ :str = flax_key_tuple[:-1] + ("weight",) SCREAMING_SNAKE_CASE_ :Optional[int] = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: SCREAMING_SNAKE_CASE_ :Union[str, Any] = flax_key_tuple[:-1] + ("weight",) return flax_key_tuple, flax_tensor def lowercase ( a , a , a ): '''simple docstring''' if "metadata" in layer: SCREAMING_SNAKE_CASE_ :Dict = layer.split("metadata" ) SCREAMING_SNAKE_CASE_ :Optional[int] = "".join(split_layer[0] )[:-1] SCREAMING_SNAKE_CASE_ :str = [tuple(("metadata" + split_layer[1]).split("/" ) )] elif "kvstore" in layer: SCREAMING_SNAKE_CASE_ :str = layer.split("kvstore" ) SCREAMING_SNAKE_CASE_ :str = "".join(split_layer[0] )[:-1] SCREAMING_SNAKE_CASE_ :str = [tuple(("kvstore" + split_layer[1]).split("/" ) )] else: SCREAMING_SNAKE_CASE_ :Union[str, Any] = layer.split("/" ) SCREAMING_SNAKE_CASE_ :Optional[int] = "/".join(split_layer[:-1] ) SCREAMING_SNAKE_CASE_ :int = (split_layer[-1],) if "kvstore/path" in layer: SCREAMING_SNAKE_CASE_ :Union[str, Any] = F"{switch_checkpoint_path}/{checkpoint_info[layer]}" elif "kvstore/driver" in layer: SCREAMING_SNAKE_CASE_ :Tuple = "file" else: SCREAMING_SNAKE_CASE_ :str = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def lowercase ( a , a ): '''simple docstring''' SCREAMING_SNAKE_CASE_ :int = rename_keys(a ) SCREAMING_SNAKE_CASE_ :Union[str, Any] = {} for k, v in current_block.items(): SCREAMING_SNAKE_CASE_ :List[str] = v SCREAMING_SNAKE_CASE_ :Optional[Any] = new_current_block torch.save(a , a ) def lowercase ( a , a , a , a , a = WEIGHTS_NAME ): '''simple docstring''' SCREAMING_SNAKE_CASE_ :Optional[int] = convert_file_size_to_int(a ) SCREAMING_SNAKE_CASE_ :int = [] SCREAMING_SNAKE_CASE_ :str = {} SCREAMING_SNAKE_CASE_ :List[str] = 0 SCREAMING_SNAKE_CASE_ :Optional[int] = 0 os.makedirs(a , exist_ok=a ) with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp: SCREAMING_SNAKE_CASE_ :int = serialization.msgpack_restore(fp.read() )["optimizer"]["target"] SCREAMING_SNAKE_CASE_ :Any = flatten_dict(a , sep="/" ) SCREAMING_SNAKE_CASE_ :Optional[Any] = {} for layer in checkpoint_info.keys(): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :List[Any] = get_key_and_tensorstore_dict( a , a , a ) if curr_real_layer_name in all_layers: SCREAMING_SNAKE_CASE_ :str = content else: SCREAMING_SNAKE_CASE_ :Optional[Any] = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file SCREAMING_SNAKE_CASE_ :Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() SCREAMING_SNAKE_CASE_ :List[Any] = torch.tensor(a ) SCREAMING_SNAKE_CASE_ :str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , a ) SCREAMING_SNAKE_CASE_ :Any = "/".join(a ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: SCREAMING_SNAKE_CASE_ :str = os.path.join( a , weights_name.replace(".bin" , F"-{len(a )+1:05d}-of-???.bin" ) ) rename_and_save_block(a , a ) sharded_state_dicts.append(current_block.keys() ) del current_block SCREAMING_SNAKE_CASE_ :Tuple = {} SCREAMING_SNAKE_CASE_ :Dict = 0 SCREAMING_SNAKE_CASE_ :Optional[int] = raw_weights.to(getattr(a , a ) ) current_block_size += weight_size total_size += weight_size # Add the last block SCREAMING_SNAKE_CASE_ :Dict = os.path.join(a , weights_name.replace(".bin" , F"-{len(a )+1:05d}-of-???.bin" ) ) rename_and_save_block(a , a ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(a ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index SCREAMING_SNAKE_CASE_ :Optional[int] = {} SCREAMING_SNAKE_CASE_ :int = {} for idx, shard in enumerate(a ): SCREAMING_SNAKE_CASE_ :Optional[Any] = weights_name.replace( ".bin" , F"-{idx+1:05d}-of-{len(a ):05d}.bin" ) # len(sharded_state_dicts):05d} SCREAMING_SNAKE_CASE_ :Any = os.path.join(a , weights_name.replace(".bin" , F"-{idx+1:05d}-of-???.bin" ) ) os.rename(a , os.path.join(a , a ) ) SCREAMING_SNAKE_CASE_ :List[Any] = shard for key in shard: SCREAMING_SNAKE_CASE_ :str = shard_file # Add the metadata SCREAMING_SNAKE_CASE_ :List[str] = {"total_size": total_size} SCREAMING_SNAKE_CASE_ :Optional[int] = {"metadata": metadata, "weight_map": weight_map} with open(os.path.join(a , a ) , "w" , encoding="utf-8" ) as f: SCREAMING_SNAKE_CASE_ :Optional[int] = json.dumps(a , indent=2 , sort_keys=a ) + "\n" f.write(a ) return metadata, index if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--switch_t5x_checkpoint_path", default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600", type=str, required=False, help="Path to a directory containing a folder per layer. Follows the original Google format.", ) parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size") parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model") parser.add_argument( "--pytorch_dump_folder_path", default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted", type=str, required=False, help="Path to the output pytorch model.", ) SCREAMING_SNAKE_CASE__ = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def lowercase ( ): '''simple docstring''' from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer SCREAMING_SNAKE_CASE_ :Dict = SwitchTransformersConfig.from_pretrained("google/switch-base-8" ) config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" ) SCREAMING_SNAKE_CASE_ :str = SwitchTransformersForConditionalGeneration.from_pretrained( "/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" ) SCREAMING_SNAKE_CASE_ :List[Any] = TaTokenizer.from_pretrained("t5-small" ) SCREAMING_SNAKE_CASE_ :Optional[int] = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>." SCREAMING_SNAKE_CASE_ :List[Any] = tokenizer(a , return_tensors="pt" ).input_ids SCREAMING_SNAKE_CASE_ :List[str] = model.generate(a , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
631
1
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class snake_case ( unittest.TestCase ): def __init__( self : int , a_ : Tuple , a_ : Union[str, Any]=7 , a_ : int=3 , a_ : Optional[Any]=30 , a_ : List[Any]=400 , a_ : Tuple=True , a_ : Dict=None , a_ : Any=True , a_ : Union[str, Any]=[0.5, 0.5, 0.5] , a_ : Any=[0.5, 0.5, 0.5] , a_ : Dict=True , a_ : int=1 / 255 , a_ : Tuple=True , )-> Tuple: """simple docstring""" # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p SCREAMING_SNAKE_CASE__ : Optional[Any] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333} SCREAMING_SNAKE_CASE__ : Optional[int] = parent SCREAMING_SNAKE_CASE__ : List[str] = batch_size SCREAMING_SNAKE_CASE__ : List[Any] = num_channels SCREAMING_SNAKE_CASE__ : Optional[Any] = min_resolution SCREAMING_SNAKE_CASE__ : Dict = max_resolution SCREAMING_SNAKE_CASE__ : int = do_resize SCREAMING_SNAKE_CASE__ : List[str] = size SCREAMING_SNAKE_CASE__ : Tuple = do_normalize SCREAMING_SNAKE_CASE__ : Tuple = image_mean SCREAMING_SNAKE_CASE__ : Optional[int] = image_std SCREAMING_SNAKE_CASE__ : Optional[int] = do_rescale SCREAMING_SNAKE_CASE__ : Optional[int] = rescale_factor SCREAMING_SNAKE_CASE__ : str = do_pad def __lowercase( self : str )-> Dict: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def __lowercase( self : Any , a_ : Union[str, Any] , a_ : List[Any]=False )-> List[Any]: """simple docstring""" if not batched: SCREAMING_SNAKE_CASE__ : Optional[int] = image_inputs[0] if isinstance(a_ , Image.Image ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = image.size else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = image.shape[1], image.shape[2] if w < h: SCREAMING_SNAKE_CASE__ : Optional[Any] = int(self.size['shortest_edge'] * h / w ) SCREAMING_SNAKE_CASE__ : Tuple = self.size['shortest_edge'] elif w > h: SCREAMING_SNAKE_CASE__ : Any = self.size['shortest_edge'] SCREAMING_SNAKE_CASE__ : Any = int(self.size['shortest_edge'] * w / h ) else: SCREAMING_SNAKE_CASE__ : Optional[Any] = self.size['shortest_edge'] SCREAMING_SNAKE_CASE__ : str = self.size['shortest_edge'] else: SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] for image in image_inputs: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) SCREAMING_SNAKE_CASE__ : Any = max(a_ , key=lambda a_ : item[0] )[0] SCREAMING_SNAKE_CASE__ : Tuple = max(a_ , key=lambda a_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = ConditionalDetrImageProcessor if is_vision_available() else None def __lowercase( self : Dict )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = ConditionalDetrImageProcessingTester(self ) @property def __lowercase( self : Dict )-> List[str]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __lowercase( self : str )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , 'image_mean' ) ) self.assertTrue(hasattr(a_ , 'image_std' ) ) self.assertTrue(hasattr(a_ , 'do_normalize' ) ) self.assertTrue(hasattr(a_ , 'do_resize' ) ) self.assertTrue(hasattr(a_ , 'size' ) ) def __lowercase( self : int )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} ) self.assertEqual(image_processor.do_pad , a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=a_ ) self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad , a_ ) def __lowercase( self : Any )-> Dict: """simple docstring""" pass def __lowercase( self : List[Any] )-> str: """simple docstring""" # Initialize image_processing SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE__ : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processor_tester.get_expected_values(a_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a_ , batched=a_ ) SCREAMING_SNAKE_CASE__ : Dict = image_processing(a_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __lowercase( self : Tuple )-> List[str]: """simple docstring""" # Initialize image_processing SCREAMING_SNAKE_CASE__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processor_tester.get_expected_values(a_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched SCREAMING_SNAKE_CASE__ : Any = image_processing(a_ , return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor_tester.get_expected_values(a_ , batched=a_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __lowercase( self : List[Any] )-> Optional[Any]: """simple docstring""" # Initialize image_processing SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE__ : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.image_processor_tester.get_expected_values(a_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(a_ , return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(a_ , batched=a_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __lowercase( self : List[str] )-> Tuple: """simple docstring""" # prepare image and target SCREAMING_SNAKE_CASE__ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f: SCREAMING_SNAKE_CASE__ : Optional[int] = json.loads(f.read() ) SCREAMING_SNAKE_CASE__ : str = {'image_id': 3_9769, 'annotations': target} # encode them SCREAMING_SNAKE_CASE__ : Optional[Any] = ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' ) SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(images=a_ , annotations=a_ , return_tensors='pt' ) # verify pixel values SCREAMING_SNAKE_CASE__ : str = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , a_ ) SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , a_ , atol=1e-4 ) ) # verify area SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , a_ ) ) # verify boxes SCREAMING_SNAKE_CASE__ : str = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , a_ ) SCREAMING_SNAKE_CASE__ : Dict = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , a_ , atol=1e-3 ) ) # verify image_id SCREAMING_SNAKE_CASE__ : str = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , a_ ) ) # verify is_crowd SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , a_ ) ) # verify class_labels SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , a_ ) ) # verify orig_size SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , a_ ) ) # verify size SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , a_ ) ) @slow def __lowercase( self : str )-> Tuple: """simple docstring""" # prepare image, target and masks_path SCREAMING_SNAKE_CASE__ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f: SCREAMING_SNAKE_CASE__ : Dict = json.loads(f.read() ) SCREAMING_SNAKE_CASE__ : Dict = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target} SCREAMING_SNAKE_CASE__ : Union[str, Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them SCREAMING_SNAKE_CASE__ : List[str] = ConditionalDetrImageProcessor(format='coco_panoptic' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processing(images=a_ , annotations=a_ , masks_path=a_ , return_tensors='pt' ) # verify pixel values SCREAMING_SNAKE_CASE__ : str = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , a_ ) SCREAMING_SNAKE_CASE__ : Dict = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , a_ , atol=1e-4 ) ) # verify area SCREAMING_SNAKE_CASE__ : int = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , a_ ) ) # verify boxes SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , a_ ) SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , a_ , atol=1e-3 ) ) # verify image_id SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , a_ ) ) # verify is_crowd SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , a_ ) ) # verify class_labels SCREAMING_SNAKE_CASE__ : str = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , a_ ) ) # verify masks SCREAMING_SNAKE_CASE__ : Tuple = 82_2873 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , a_ ) # verify orig_size SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , a_ ) ) # verify size SCREAMING_SNAKE_CASE__ : str = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , a_ ) )
636
import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def _a ( lowercase__ : int ): '''simple docstring''' if is_torch_version('<' , '2.0.0' ) or not hasattr(lowercase__ , '_dynamo' ): return False return isinstance(lowercase__ , torch._dynamo.eval_frame.OptimizedModule ) def _a ( lowercase__ : Optional[Any] , lowercase__ : bool = True ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) SCREAMING_SNAKE_CASE__ : Dict = is_compiled_module(lowercase__ ) if is_compiled: SCREAMING_SNAKE_CASE__ : Tuple = model SCREAMING_SNAKE_CASE__ : int = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(lowercase__ , lowercase__ ): SCREAMING_SNAKE_CASE__ : Any = model.module if not keep_fpaa_wrapper: SCREAMING_SNAKE_CASE__ : List[Any] = getattr(lowercase__ , 'forward' ) SCREAMING_SNAKE_CASE__ : str = model.__dict__.pop('_original_forward' , lowercase__ ) if original_forward is not None: while hasattr(lowercase__ , '__wrapped__' ): SCREAMING_SNAKE_CASE__ : Dict = forward.__wrapped__ if forward == original_forward: break SCREAMING_SNAKE_CASE__ : Dict = forward if getattr(lowercase__ , '_converted_to_transformer_engine' , lowercase__ ): convert_model(lowercase__ , to_transformer_engine=lowercase__ ) if is_compiled: SCREAMING_SNAKE_CASE__ : List[Any] = model SCREAMING_SNAKE_CASE__ : Optional[Any] = compiled_model return model def _a ( ): '''simple docstring''' PartialState().wait_for_everyone() def _a ( lowercase__ : str , lowercase__ : Optional[Any] ): '''simple docstring''' if PartialState().distributed_type == DistributedType.TPU: xm.save(lowercase__ , lowercase__ ) elif PartialState().local_process_index == 0: torch.save(lowercase__ , lowercase__ ) @contextmanager def _a ( **lowercase__ : str ): '''simple docstring''' for key, value in kwargs.items(): SCREAMING_SNAKE_CASE__ : int = str(lowercase__ ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def _a ( lowercase__ : Optional[Any] ): '''simple docstring''' if not hasattr(lowercase__ , '__qualname__' ) and not hasattr(lowercase__ , '__name__' ): SCREAMING_SNAKE_CASE__ : Any = getattr(lowercase__ , '__class__' , lowercase__ ) if hasattr(lowercase__ , '__qualname__' ): return obj.__qualname__ if hasattr(lowercase__ , '__name__' ): return obj.__name__ return str(lowercase__ ) def _a ( lowercase__ : List[str] , lowercase__ : List[Any] ): '''simple docstring''' for key, value in source.items(): if isinstance(lowercase__ , lowercase__ ): SCREAMING_SNAKE_CASE__ : List[str] = destination.setdefault(lowercase__ , {} ) merge_dicts(lowercase__ , lowercase__ ) else: SCREAMING_SNAKE_CASE__ : List[Any] = value return destination def _a ( lowercase__ : int = None ): '''simple docstring''' if port is None: SCREAMING_SNAKE_CASE__ : int = 2_95_00 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(('localhost', port) ) == 0
636
1
'''simple docstring''' from math import isqrt def __UpperCamelCase( _A : int ): '''simple docstring''' return all(number % divisor != 0 for divisor in range(2 , isqrt(_A ) + 1 ) ) def __UpperCamelCase( _A : int = 10**6 ): '''simple docstring''' UpperCAmelCase__ : List[str] = 0 UpperCAmelCase__ : List[Any] = 1 UpperCAmelCase__ : List[str] = 7 while prime_candidate < max_prime: primes_count += is_prime(_A ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(f"""{solution() = }""")
614
'''simple docstring''' from __future__ import annotations def __UpperCamelCase( _A : list[int] , _A : int , _A : int , _A : int ): '''simple docstring''' if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = array[indexa], array[indexa] def __UpperCamelCase( _A : list[int] , _A : int , _A : int , _A : int ): '''simple docstring''' if length > 1: UpperCAmelCase__ : List[Any] = int(length / 2 ) for i in range(_A , low + middle ): comp_and_swap(_A , _A , i + middle , _A ) bitonic_merge(_A , _A , _A , _A ) bitonic_merge(_A , low + middle , _A , _A ) def __UpperCamelCase( _A : list[int] , _A : int , _A : int , _A : int ): '''simple docstring''' if length > 1: UpperCAmelCase__ : Optional[int] = int(length / 2 ) bitonic_sort(_A , _A , _A , 1 ) bitonic_sort(_A , low + middle , _A , 0 ) bitonic_merge(_A , _A , _A , _A ) if __name__ == "__main__": UpperCamelCase__ : Dict = input('Enter numbers separated by a comma:\n').strip() UpperCamelCase__ : Tuple = [int(item.strip()) for item in user_input.split(',')] bitonic_sort(unsorted, 0, len(unsorted), 1) print('\nSorted array in ascending order is: ', end='') print(*unsorted, sep=', ') bitonic_merge(unsorted, 0, len(unsorted), 0) print('Sorted array in descending order is: ', end='') print(*unsorted, sep=', ')
614
1
"""simple docstring""" import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) A__ : Dict= logging.getLogger(__name__) def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" UpperCamelCase__ = np.argmax(SCREAMING_SNAKE_CASE , axis=1 ) return np.sum(outputs == labels ) def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" with open(SCREAMING_SNAKE_CASE , encoding='utf_8' ) as f: UpperCamelCase__ = csv.reader(SCREAMING_SNAKE_CASE ) UpperCamelCase__ = [] next(SCREAMING_SNAKE_CASE ) # skip the first line for line in tqdm(SCREAMING_SNAKE_CASE ): output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" UpperCamelCase__ = [] for dataset in encoded_datasets: UpperCamelCase__ = len(SCREAMING_SNAKE_CASE ) UpperCamelCase__ = np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) UpperCamelCase__ = np.zeros((n_batch, 2) , dtype=np.intaa ) UpperCamelCase__ = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa ) UpperCamelCase__ = np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(SCREAMING_SNAKE_CASE ): UpperCamelCase__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] UpperCamelCase__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] UpperCamelCase__ = with_conta UpperCamelCase__ = with_conta UpperCamelCase__ = len(SCREAMING_SNAKE_CASE ) - 1 UpperCamelCase__ = len(SCREAMING_SNAKE_CASE ) - 1 UpperCamelCase__ = with_conta UpperCamelCase__ = with_conta UpperCamelCase__ = mc_label UpperCamelCase__ = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE ) for t in all_inputs ) ) return tensor_datasets def lowerCAmelCase_( ) -> Union[str, Any]: """simple docstring""" UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE , default='openai-gpt' , help='pretrained model name' ) parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' ) parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' ) parser.add_argument( '--output_dir' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='The output directory where the model predictions and checkpoints will be written.' , ) parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE , default='' ) parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE , default='' ) parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE , default=42 ) parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE , default=3 ) parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE , default=8 ) parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE , default=16 ) parser.add_argument('--adam_epsilon' , default=1E-8 , type=SCREAMING_SNAKE_CASE , help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE , default=1 ) parser.add_argument( '--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE , help=( 'If > 0: set total number of training steps to perform. Override num_train_epochs.' ) , ) parser.add_argument( '--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , ) parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE , default=6.25E-5 ) parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE , help='Linear warmup over warmup_steps.' ) parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE , default='warmup_linear' ) parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE , default=0.01 ) parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE , default=0.9 ) parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE , default=3_74 ) parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE , default='' , help='Can be used for distant debugging.' ) UpperCamelCase__ = parser.parse_args() print(SCREAMING_SNAKE_CASE ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) UpperCamelCase__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) UpperCamelCase__ = torch.cuda.device_count() logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) if not args.do_train and not args.do_eval: raise ValueError('At least one of `do_train` or `do_eval` must be True.' ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset UpperCamelCase__ = ['_start_', '_delimiter_', '_classify_'] UpperCamelCase__ = OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(SCREAMING_SNAKE_CASE ) UpperCamelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) UpperCamelCase__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE ) ) model.to(SCREAMING_SNAKE_CASE ) # Load and encode the datasets def tokenize_and_encode(SCREAMING_SNAKE_CASE ): if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE ) ) elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): return obj return [tokenize_and_encode(SCREAMING_SNAKE_CASE ) for o in obj] logger.info('Encoding dataset...' ) UpperCamelCase__ = load_rocstories_dataset(args.train_dataset ) UpperCamelCase__ = load_rocstories_dataset(args.eval_dataset ) UpperCamelCase__ = (train_dataset, eval_dataset) UpperCamelCase__ = tokenize_and_encode(SCREAMING_SNAKE_CASE ) # Compute the max input length for the Transformer UpperCamelCase__ = model.config.n_positions // 2 - 2 UpperCamelCase__ = max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) UpperCamelCase__ = min(SCREAMING_SNAKE_CASE , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders UpperCamelCase__ = pre_process_datasets(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ) UpperCamelCase__ , UpperCamelCase__ = tensor_datasets[0], tensor_datasets[1] UpperCamelCase__ = TensorDataset(*SCREAMING_SNAKE_CASE ) UpperCamelCase__ = RandomSampler(SCREAMING_SNAKE_CASE ) UpperCamelCase__ = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE , batch_size=args.train_batch_size ) UpperCamelCase__ = TensorDataset(*SCREAMING_SNAKE_CASE ) UpperCamelCase__ = SequentialSampler(SCREAMING_SNAKE_CASE ) UpperCamelCase__ = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: UpperCamelCase__ = args.max_steps UpperCamelCase__ = args.max_steps // (len(SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps) + 1 else: UpperCamelCase__ = len(SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps * args.num_train_epochs UpperCamelCase__ = list(model.named_parameters() ) UpperCamelCase__ = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] UpperCamelCase__ = [ { 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], 'weight_decay': args.weight_decay, }, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0}, ] UpperCamelCase__ = AdamW(SCREAMING_SNAKE_CASE , lr=args.learning_rate , eps=args.adam_epsilon ) UpperCamelCase__ = get_linear_schedule_with_warmup( SCREAMING_SNAKE_CASE , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE ) if args.do_train: UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ): UpperCamelCase__ = 0 UpperCamelCase__ = 0 UpperCamelCase__ = tqdm(SCREAMING_SNAKE_CASE , desc='Training' ) for step, batch in enumerate(SCREAMING_SNAKE_CASE ): UpperCamelCase__ = tuple(t.to(SCREAMING_SNAKE_CASE ) for t in batch ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = batch UpperCamelCase__ = model(SCREAMING_SNAKE_CASE , mc_token_ids=SCREAMING_SNAKE_CASE , lm_labels=SCREAMING_SNAKE_CASE , mc_labels=SCREAMING_SNAKE_CASE ) UpperCamelCase__ = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() UpperCamelCase__ = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 UpperCamelCase__ = 'Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer UpperCamelCase__ = model.module if hasattr(SCREAMING_SNAKE_CASE , 'module' ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` UpperCamelCase__ = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE ) UpperCamelCase__ = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE ) torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE ) model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned UpperCamelCase__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) UpperCamelCase__ = OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(SCREAMING_SNAKE_CASE ) if args.do_eval: model.eval() UpperCamelCase__ , UpperCamelCase__ = 0, 0 UpperCamelCase__ , UpperCamelCase__ = 0, 0 for batch in tqdm(SCREAMING_SNAKE_CASE , desc='Evaluating' ): UpperCamelCase__ = tuple(t.to(SCREAMING_SNAKE_CASE ) for t in batch ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = batch with torch.no_grad(): UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = model( SCREAMING_SNAKE_CASE , mc_token_ids=SCREAMING_SNAKE_CASE , lm_labels=SCREAMING_SNAKE_CASE , mc_labels=SCREAMING_SNAKE_CASE ) UpperCamelCase__ = mc_logits.detach().cpu().numpy() UpperCamelCase__ = mc_labels.to('cpu' ).numpy() UpperCamelCase__ = accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 UpperCamelCase__ = eval_loss / nb_eval_steps UpperCamelCase__ = eval_accuracy / nb_eval_examples UpperCamelCase__ = tr_loss / nb_tr_steps if args.do_train else None UpperCamelCase__ = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss} UpperCamelCase__ = os.path.join(args.output_dir , 'eval_results.txt' ) with open(SCREAMING_SNAKE_CASE , 'w' ) as writer: logger.info('***** Eval results *****' ) for key in sorted(result.keys() ): logger.info(' %s = %s' , SCREAMING_SNAKE_CASE , str(result[key] ) ) writer.write('%s = %s\n' % (key, str(result[key] )) ) if __name__ == "__main__": main()
709
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging A__ : List[str]= logging.get_logger(__name__) class __lowerCamelCase ( _a ): a : Optional[int] ="""timm_backbone""" def __init__( self , snake_case_=None , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Dict: super().__init__(**snake_case_ ) UpperCamelCase__ = backbone UpperCamelCase__ = num_channels UpperCamelCase__ = features_only UpperCamelCase__ = use_pretrained_backbone UpperCamelCase__ = True UpperCamelCase__ = out_indices if out_indices is not None else (-1,)
20
0
import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any: """simple docstring""" def wrapper(*lowercase_ , **lowercase_ ): A__ = timeit.default_timer() A__ = func(*lowercase_ , **lowercase_ ) A__ = timeit.default_timer() - starttime return delta A__ = func.__name__ return wrapper def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=100 , lowercase_=None ) -> Dict: """simple docstring""" A__ = [] A__ = seq_shapes or {} for i in range(lowercase_ ): A__ = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(lowercase_ , _ArrayXD ): A__ = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(lowercase_ , datasets.Value ): if v.dtype == "string": A__ = 'The small grey turtle was surprisingly fast when challenged.' else: A__ = np.random.randint(10 , size=1 ).astype(v.dtype ).item() elif isinstance(lowercase_ , datasets.Sequence ): while isinstance(lowercase_ , datasets.Sequence ): A__ = v.feature A__ = seq_shapes[k] A__ = np.random.rand(*lowercase_ ).astype(v.dtype ) A__ = data dummy_data.append((i, example) ) return dummy_data def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=100 , lowercase_=None ) -> Any: """simple docstring""" A__ = generate_examples(lowercase_ , num_examples=lowercase_ , seq_shapes=lowercase_ ) with ArrowWriter(features=lowercase_ , path=lowercase_ ) as writer: for key, record in dummy_data: A__ = features.encode_example(lowercase_ ) writer.write(lowercase_ ) A__ = writer.finalize() if not num_final_examples == num_examples: raise ValueError( f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" ) A__ = datasets.Dataset.from_file(filename=lowercase_ , info=datasets.DatasetInfo(features=lowercase_ ) ) return dataset
87
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) def lowercase__ ( lowerCamelCase ): # initialize config if "resnet-50" in model_name: _SCREAMING_SNAKE_CASE : Optional[int] = ResNetConfig.from_pretrained('microsoft/resnet-50' ) elif "resnet-101" in model_name: _SCREAMING_SNAKE_CASE : Optional[Any] = ResNetConfig.from_pretrained('microsoft/resnet-101' ) else: raise ValueError('Model name should include either resnet50 or resnet101' ) _SCREAMING_SNAKE_CASE : Optional[Any] = DetrConfig(use_timm_backbone=lowerCamelCase, backbone_config=lowerCamelCase ) # set label attributes _SCREAMING_SNAKE_CASE : Optional[int] = 'panoptic' in model_name if is_panoptic: _SCREAMING_SNAKE_CASE : List[str] = 250 else: _SCREAMING_SNAKE_CASE : Optional[Any] = 91 _SCREAMING_SNAKE_CASE : Union[str, Any] = 'huggingface/label-files' _SCREAMING_SNAKE_CASE : Dict = 'coco-detection-id2label.json' _SCREAMING_SNAKE_CASE : List[str] = json.load(open(hf_hub_download(lowerCamelCase, lowerCamelCase, repo_type='dataset' ), 'r' ) ) _SCREAMING_SNAKE_CASE : Dict = {int(lowerCamelCase ): v for k, v in idalabel.items()} _SCREAMING_SNAKE_CASE : List[Any] = idalabel _SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()} return config, is_panoptic def lowercase__ ( lowerCamelCase ): # here we list all keys to be renamed (original name on the left, our name on the right) _SCREAMING_SNAKE_CASE : Optional[int] = [] # stem # fmt: off rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') ) rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') ) rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') ) rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') ) rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') ) # stages for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): # shortcut if layer_idx == 0: rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""", ) ) # 3 convs for i in range(3 ): rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""", ) ) # fmt: on for i in range(config.encoder_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""", ) ) rename_keys.append( (f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias""") ) rename_keys.append( (f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append( (f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append( (f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias""") ) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""", ) ) rename_keys.append( (f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append( ( f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""", f"""decoder.layers.{i}.encoder_attn.out_proj.weight""", ) ) rename_keys.append( ( f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""", f"""decoder.layers.{i}.encoder_attn.out_proj.bias""", ) ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias""") ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ] ) return rename_keys def lowercase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): _SCREAMING_SNAKE_CASE : Dict = state_dict.pop(lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = val def lowercase__ ( lowerCamelCase, lowerCamelCase=False ): _SCREAMING_SNAKE_CASE : Optional[int] = '' if is_panoptic: _SCREAMING_SNAKE_CASE : List[Any] = 'detr.' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) _SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) _SCREAMING_SNAKE_CASE : Dict = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict _SCREAMING_SNAKE_CASE : Any = in_proj_weight[:256, :] _SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[:256] _SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[256:512, :] _SCREAMING_SNAKE_CASE : Tuple = in_proj_bias[256:512] _SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[-256:, :] _SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention _SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" ) _SCREAMING_SNAKE_CASE : int = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict _SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[:256, :] _SCREAMING_SNAKE_CASE : str = in_proj_bias[:256] _SCREAMING_SNAKE_CASE : str = in_proj_weight[256:512, :] _SCREAMING_SNAKE_CASE : int = in_proj_bias[256:512] _SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[-256:, :] _SCREAMING_SNAKE_CASE : List[str] = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention _SCREAMING_SNAKE_CASE : str = state_dict.pop( f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" ) _SCREAMING_SNAKE_CASE : List[Any] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) of cross-attention to the state dict _SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight_cross_attn[:256, :] _SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias_cross_attn[:256] _SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight_cross_attn[256:512, :] _SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias_cross_attn[256:512] _SCREAMING_SNAKE_CASE : int = in_proj_weight_cross_attn[-256:, :] _SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias_cross_attn[-256:] def lowercase__ ( ): _SCREAMING_SNAKE_CASE : List[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg' _SCREAMING_SNAKE_CASE : Dict = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw ) return im @torch.no_grad() def lowercase__ ( lowerCamelCase, lowerCamelCase=None, lowerCamelCase=False ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = get_detr_config(lowerCamelCase ) # load original model from torch hub _SCREAMING_SNAKE_CASE : Optional[Any] = { 'detr-resnet-50': 'detr_resnet50', 'detr-resnet-101': 'detr_resnet101', } logger.info(f"""Converting model {model_name}...""" ) _SCREAMING_SNAKE_CASE : int = torch.hub.load('facebookresearch/detr', model_name_to_original_name[model_name], pretrained=lowerCamelCase ).eval() _SCREAMING_SNAKE_CASE : Optional[Any] = detr.state_dict() # rename keys for src, dest in create_rename_keys(lowerCamelCase ): if is_panoptic: _SCREAMING_SNAKE_CASE : int = 'detr.' + src rename_key(lowerCamelCase, lowerCamelCase, lowerCamelCase ) # query, key and value matrices need special treatment read_in_q_k_v(lowerCamelCase, is_panoptic=lowerCamelCase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them _SCREAMING_SNAKE_CASE : str = 'detr.model.' if is_panoptic else 'model.' for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith('detr' ) and not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ) ): _SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(lowerCamelCase ) _SCREAMING_SNAKE_CASE : int = val elif "class_labels_classifier" in key or "bbox_predictor" in key: _SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = val elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ): continue else: _SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(lowerCamelCase ) _SCREAMING_SNAKE_CASE : Union[str, Any] = val else: if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ): _SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(lowerCamelCase ) _SCREAMING_SNAKE_CASE : int = val # finally, create HuggingFace model and load state dict _SCREAMING_SNAKE_CASE : Dict = DetrForSegmentation(lowerCamelCase ) if is_panoptic else DetrForObjectDetection(lowerCamelCase ) model.load_state_dict(lowerCamelCase ) model.eval() # verify our conversion on an image _SCREAMING_SNAKE_CASE : Optional[Any] = 'coco_panoptic' if is_panoptic else 'coco_detection' _SCREAMING_SNAKE_CASE : int = DetrImageProcessor(format=lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = processor(images=prepare_img(), return_tensors='pt' ) _SCREAMING_SNAKE_CASE : Tuple = encoding['pixel_values'] _SCREAMING_SNAKE_CASE : List[str] = detr(lowerCamelCase ) _SCREAMING_SNAKE_CASE : int = model(lowerCamelCase ) assert torch.allclose(outputs.logits, original_outputs['pred_logits'], atol=1E-3 ) assert torch.allclose(outputs.pred_boxes, original_outputs['pred_boxes'], atol=1E-3 ) if is_panoptic: assert torch.allclose(outputs.pred_masks, original_outputs['pred_masks'], atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase ) model.save_pretrained(lowerCamelCase ) processor.save_pretrained(lowerCamelCase ) if push_to_hub: # Upload model and image processor to the hub logger.info('Uploading PyTorch model and image processor to the hub...' ) model.push_to_hub(f"""nielsr/{model_name}""" ) processor.push_to_hub(f"""nielsr/{model_name}""" ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument( '--model_name', default='detr-resnet-50', type=str, choices=['detr-resnet-50', 'detr-resnet-101'], help='Name of the DETR model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.') lowerCAmelCase__ = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
621
0
"""simple docstring""" from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging a : List[str] = logging.get_logger(__name__) class a_ ( _UpperCAmelCase ): a : int = ['pixel_values'] def __init__( self : List[Any] , __UpperCamelCase : bool = True , __UpperCamelCase : int = 32 , __UpperCamelCase : Optional[Any]=PILImageResampling.BILINEAR , __UpperCamelCase : bool = True , **__UpperCamelCase : Any , ) ->None: '''simple docstring''' _UpperCAmelCase = do_resize _UpperCAmelCase = do_rescale _UpperCAmelCase = size_divisor _UpperCAmelCase = resample super().__init__(**__UpperCamelCase ) def _snake_case ( self : Optional[Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[ChannelDimension] = None , **__UpperCamelCase : Any ) ->np.ndarray: '''simple docstring''' _UpperCAmelCase ,_UpperCAmelCase = get_image_size(__UpperCamelCase ) # Rounds the height and width down to the closest multiple of size_divisor _UpperCAmelCase = height // size_divisor * size_divisor _UpperCAmelCase = width // size_divisor * size_divisor _UpperCAmelCase = resize(__UpperCamelCase , (new_h, new_w) , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) return image def _snake_case ( self : str , __UpperCamelCase : np.ndarray , __UpperCamelCase : float , __UpperCamelCase : Optional[ChannelDimension] = None , **__UpperCamelCase : Optional[Any] ) ->np.ndarray: '''simple docstring''' return rescale(image=__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def _snake_case ( self : int , __UpperCamelCase : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Dict=None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[Union[TensorType, str]] = None , __UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **__UpperCamelCase : Optional[int] , ) ->BatchFeature: '''simple docstring''' _UpperCAmelCase = do_resize if do_resize is not None else self.do_resize _UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _UpperCAmelCase = size_divisor if size_divisor is not None else self.size_divisor _UpperCAmelCase = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError("""size_divisor is required for resizing""" ) _UpperCAmelCase = make_list_of_images(__UpperCamelCase ) if not valid_images(__UpperCamelCase ): raise ValueError("""Invalid image(s)""" ) # All transformations expect numpy arrays. _UpperCAmelCase = [to_numpy_array(__UpperCamelCase ) for img in images] if do_resize: _UpperCAmelCase = [self.resize(__UpperCamelCase , size_divisor=__UpperCamelCase , resample=__UpperCamelCase ) for image in images] if do_rescale: _UpperCAmelCase = [self.rescale(__UpperCamelCase , scale=1 / 2_55 ) for image in images] _UpperCAmelCase = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images] _UpperCAmelCase = {"""pixel_values""": images} return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
19
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a : Tuple = { '''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : List[str] = [ '''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PegasusXForConditionalGeneration''', '''PegasusXModel''', '''PegasusXPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
19
1
'''simple docstring''' import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" lowerCamelCase : torch.FloatTensor lowerCamelCase : Optional[torch.FloatTensor] = None def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any]=0.999 , UpperCAmelCase_ : Tuple="cosine" , ) -> Optional[Any]: if alpha_transform_type == "cosine": def alpha_bar_fn(UpperCAmelCase_ : Optional[int] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(UpperCAmelCase_ : Tuple ): return math.exp(t * -12.0 ) else: raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' ) __lowerCamelCase : List[str] = [] for i in range(UpperCAmelCase_ ): __lowerCamelCase : int = i / num_diffusion_timesteps __lowerCamelCase : List[str] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(UpperCAmelCase_ ) / alpha_bar_fn(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) return torch.tensor(UpperCAmelCase_ , dtype=torch.floataa ) class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase ): """simple docstring""" @register_to_config def __init__( self , SCREAMING_SNAKE_CASE_ = 10_00 , SCREAMING_SNAKE_CASE_ = "fixed_small_log" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = "epsilon" , SCREAMING_SNAKE_CASE_ = "squaredcos_cap_v2" , ) -> List[str]: if beta_schedule != "squaredcos_cap_v2": raise ValueError('UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'' ) __lowerCamelCase : Optional[Any] = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = 1.0 - self.betas __lowerCamelCase : Union[str, Any] = torch.cumprod(self.alphas , dim=0 ) __lowerCamelCase : Optional[Any] = torch.tensor(1.0 ) # standard deviation of the initial noise distribution __lowerCamelCase : Optional[Any] = 1.0 # setable values __lowerCamelCase : Optional[Any] = None __lowerCamelCase : Optional[int] = torch.from_numpy(np.arange(0 , SCREAMING_SNAKE_CASE_ )[::-1].copy() ) __lowerCamelCase : Optional[int] = variance_type def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> torch.FloatTensor: return sample def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Optional[int]: __lowerCamelCase : Optional[int] = num_inference_steps __lowerCamelCase : Any = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) __lowerCamelCase : str = (np.arange(0 , SCREAMING_SNAKE_CASE_ ) * step_ratio).round()[::-1].copy().astype(np.intaa ) __lowerCamelCase : List[str] = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ) -> Dict: if prev_timestep is None: __lowerCamelCase : Dict = t - 1 __lowerCamelCase : int = self.alphas_cumprod[t] __lowerCamelCase : str = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one __lowerCamelCase : Optional[int] = 1 - alpha_prod_t __lowerCamelCase : Dict = 1 - alpha_prod_t_prev if prev_timestep == t - 1: __lowerCamelCase : List[str] = self.betas[t] else: __lowerCamelCase : List[str] = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample __lowerCamelCase : List[str] = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: __lowerCamelCase : str = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": __lowerCamelCase : Optional[Any] = torch.log(torch.clamp(SCREAMING_SNAKE_CASE_ , min=1E-20 ) ) __lowerCamelCase : List[Any] = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler __lowerCamelCase : str = variance.log() __lowerCamelCase : Dict = beta.log() __lowerCamelCase : List[str] = (predicted_variance + 1) / 2 __lowerCamelCase : str = frac * max_log + (1 - frac) * min_log return variance def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]: __lowerCamelCase : List[str] = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": __lowerCamelCase , __lowerCamelCase : List[Any] = torch.split(SCREAMING_SNAKE_CASE_ , sample.shape[1] , dim=1 ) else: __lowerCamelCase : Optional[int] = None # 1. compute alphas, betas if prev_timestep is None: __lowerCamelCase : str = t - 1 __lowerCamelCase : int = self.alphas_cumprod[t] __lowerCamelCase : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one __lowerCamelCase : int = 1 - alpha_prod_t __lowerCamelCase : List[Any] = 1 - alpha_prod_t_prev if prev_timestep == t - 1: __lowerCamelCase : Dict = self.betas[t] __lowerCamelCase : List[Any] = self.alphas[t] else: __lowerCamelCase : Optional[Any] = 1 - alpha_prod_t / alpha_prod_t_prev __lowerCamelCase : Dict = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": __lowerCamelCase : Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": __lowerCamelCase : Optional[Any] = model_output else: raise ValueError( f'prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`' ' for the UnCLIPScheduler.' ) # 3. Clip "predicted x_0" if self.config.clip_sample: __lowerCamelCase : Dict = torch.clamp( SCREAMING_SNAKE_CASE_ , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __lowerCamelCase : Any = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t __lowerCamelCase : Optional[int] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __lowerCamelCase : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise __lowerCamelCase : Dict = 0 if t > 0: __lowerCamelCase : Union[str, Any] = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=SCREAMING_SNAKE_CASE_ , device=model_output.device ) __lowerCamelCase : Union[str, Any] = self._get_variance( SCREAMING_SNAKE_CASE_ , predicted_variance=SCREAMING_SNAKE_CASE_ , prev_timestep=SCREAMING_SNAKE_CASE_ , ) if self.variance_type == "fixed_small_log": __lowerCamelCase : Optional[Any] = variance elif self.variance_type == "learned_range": __lowerCamelCase : Tuple = (0.5 * variance).exp() else: raise ValueError( f'variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`' ' for the UnCLIPScheduler.' ) __lowerCamelCase : Tuple = variance * variance_noise __lowerCamelCase : List[str] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> torch.FloatTensor: # Make sure alphas_cumprod and timestep have same device and dtype as original_samples __lowerCamelCase : str = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) __lowerCamelCase : Any = timesteps.to(original_samples.device ) __lowerCamelCase : Tuple = alphas_cumprod[timesteps] ** 0.5 __lowerCamelCase : List[Any] = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): __lowerCamelCase : Union[str, Any] = sqrt_alpha_prod.unsqueeze(-1 ) __lowerCamelCase : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5 __lowerCamelCase : Tuple = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): __lowerCamelCase : str = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) __lowerCamelCase : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
13
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase__ : List[str] = logging.get_logger(__name__) UpperCamelCase__ : str = { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json', 'umberto-commoncrawl-cased-v1': ( 'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json' ), 'umberto-wikipedia-uncased-v1': ( 'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json' ), } class _lowercase ( lowerCAmelCase ): '''simple docstring''' UpperCAmelCase_ : Dict = '''camembert''' def __init__( self ,lowerCamelCase_=30522 ,lowerCamelCase_=768 ,lowerCamelCase_=12 ,lowerCamelCase_=12 ,lowerCamelCase_=3072 ,lowerCamelCase_="gelu" ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.1 ,lowerCamelCase_=512 ,lowerCamelCase_=2 ,lowerCamelCase_=0.02 ,lowerCamelCase_=1e-12 ,lowerCamelCase_=1 ,lowerCamelCase_=0 ,lowerCamelCase_=2 ,lowerCamelCase_="absolute" ,lowerCamelCase_=True ,lowerCamelCase_=None ,**lowerCamelCase_ ,) -> str: '''simple docstring''' super().__init__(pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ ) UpperCAmelCase__ : int = vocab_size UpperCAmelCase__ : List[Any] = hidden_size UpperCAmelCase__ : Tuple = num_hidden_layers UpperCAmelCase__ : List[str] = num_attention_heads UpperCAmelCase__ : int = hidden_act UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : List[Any] = hidden_dropout_prob UpperCAmelCase__ : Tuple = attention_probs_dropout_prob UpperCAmelCase__ : str = max_position_embeddings UpperCAmelCase__ : Union[str, Any] = type_vocab_size UpperCAmelCase__ : int = initializer_range UpperCAmelCase__ : Optional[Any] = layer_norm_eps UpperCAmelCase__ : Optional[Any] = position_embedding_type UpperCAmelCase__ : str = use_cache UpperCAmelCase__ : List[Any] = classifier_dropout class _lowercase ( lowerCAmelCase ): '''simple docstring''' @property def lowerCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": UpperCAmelCase__ : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCAmelCase__ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
614
0
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging __lowerCamelCase : List[Any] = logging.get_logger(__name__) __lowerCamelCase : List[Any] = { "google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json", # See all umt5 models at https://huggingface.co/models?filter=umt5 } class __magic_name__ ( A__ ): lowercase : Tuple ='''umt5''' lowercase : Any =['''past_key_values'''] def __init__( self : Dict , UpperCamelCase__ : int=25_01_12 , UpperCamelCase__ : Optional[int]=5_12 , UpperCamelCase__ : str=64 , UpperCamelCase__ : str=10_24 , UpperCamelCase__ : int=8 , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=6 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : str=1_28 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Tuple=1e-6 , UpperCamelCase__ : int=1.0 , UpperCamelCase__ : Optional[int]="gated-gelu" , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Tuple="T5Tokenizer" , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : List[Any]=0 , **UpperCamelCase__ : Union[str, Any] , ) -> int: '''simple docstring''' super().__init__( is_encoder_decoder=UpperCamelCase__ , tokenizer_class=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , ) UpperCAmelCase = vocab_size UpperCAmelCase = d_model UpperCAmelCase = d_kv UpperCAmelCase = d_ff UpperCAmelCase = num_layers UpperCAmelCase = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry UpperCAmelCase = num_heads UpperCAmelCase = relative_attention_num_buckets UpperCAmelCase = relative_attention_max_distance UpperCAmelCase = dropout_rate UpperCAmelCase = layer_norm_epsilon UpperCAmelCase = initializer_factor UpperCAmelCase = feed_forward_proj UpperCAmelCase = use_cache UpperCAmelCase = self.feed_forward_proj.split("-" ) UpperCAmelCase = act_info[-1] UpperCAmelCase = act_info[0] == "gated" if len(UpperCamelCase__ ) > 1 and act_info[0] != "gated" or len(UpperCamelCase__ ) > 2: raise ValueError( F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.' "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) if feed_forward_proj == "gated-gelu": UpperCAmelCase = "gelu_new" @property def SCREAMING_SNAKE_CASE_ ( self : str ) -> int: '''simple docstring''' return self.d_model @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int: '''simple docstring''' return self.num_heads @property def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> int: '''simple docstring''' return self.num_layers class __magic_name__ ( A__ ): @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def SCREAMING_SNAKE_CASE_ ( self : int ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' UpperCAmelCase = { "input_ids": {0: "batch", 1: "encoder_sequence"}, "attention_mask": {0: "batch", 1: "encoder_sequence"}, } if self.use_past: UpperCAmelCase = "past_encoder_sequence + sequence" UpperCAmelCase = {0: "batch"} UpperCAmelCase = {0: "batch", 1: "past_decoder_sequence + sequence"} else: UpperCAmelCase = {0: "batch", 1: "decoder_sequence"} UpperCAmelCase = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(UpperCamelCase__ , direction="inputs" ) return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int: '''simple docstring''' return 13 @property def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> float: '''simple docstring''' return 5e-4
708
import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class __magic_name__ ( A__, unittest.TestCase ): lowercase : Optional[Any] =CpmAntTokenizer lowercase : Dict =False def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Dict: '''simple docstring''' super().setUp() UpperCAmelCase = [ "<d>", "</d>", "<s>", "</s>", "</_>", "<unk>", "<pad>", "</n>", "我", "是", "C", "P", "M", "A", "n", "t", ] UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) @tooslow def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Dict: '''simple docstring''' UpperCAmelCase = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" ) UpperCAmelCase = "今天天气真好!" UpperCAmelCase = ["今天", "天气", "真", "好", "!"] UpperCAmelCase = tokenizer.tokenize(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) UpperCAmelCase = "今天天气真好!" UpperCAmelCase = [tokenizer.bos_token] + tokens UpperCAmelCase = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ ) UpperCAmelCase = tokenizer.decode(UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
457
0
from unittest.mock import Mock, patch from file_transfer.send_file import send_file @patch("socket.socket" ) @patch("builtins.open" ) def lowerCAmelCase_ ( __a , __a ) -> Dict: """simple docstring""" lowerCamelCase__: int =Mock() lowerCamelCase__: Tuple =conn, Mock() lowerCamelCase__: Optional[int] =iter([1, None] ) lowerCamelCase__: str =lambda __a : next(_lowerCAmelCase ) # ===== invoke ===== send_file(filename="mytext.txt" , testing=_lowerCAmelCase ) # ===== ensurance ===== sock.assert_called_once() sock.return_value.bind.assert_called_once() sock.return_value.listen.assert_called_once() sock.return_value.accept.assert_called_once() conn.recv.assert_called_once() file.return_value.__enter__.assert_called_once() file.return_value.__enter__.return_value.read.assert_called() conn.send.assert_called_once() conn.close.assert_called_once() sock.return_value.shutdown.assert_called_once() sock.return_value.close.assert_called_once()
59
from __future__ import annotations SCREAMING_SNAKE_CASE_:Tuple = """#""" class SCREAMING_SNAKE_CASE__ : '''simple docstring''' def __init__( self ): A : dict = {} def _lowerCAmelCase ( self, lowerCamelCase__ ): A : List[Any] = self._trie for char in text: if char not in trie: A : str = {} A : str = trie[char] A : Optional[int] = True def _lowerCAmelCase ( self, lowerCamelCase__ ): A : Dict = self._trie for char in prefix: if char in trie: A : Optional[Any] = trie[char] else: return [] return self._elements(lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__ ): A : int = [] for c, v in d.items(): A : List[Any] = [""" """] if c == END else [(c + s) for s in self._elements(lowerCamelCase__ )] result.extend(lowerCamelCase__ ) return tuple(lowerCamelCase__ ) SCREAMING_SNAKE_CASE_:Any = Trie() SCREAMING_SNAKE_CASE_:Tuple = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""") for word in words: trie.insert_word(word) def __UpperCamelCase ( _lowerCAmelCase ) -> tuple: """simple docstring""" A : List[str] = trie.find_word(_lowerCAmelCase ) return tuple(string + word for word in suffixes ) def __UpperCamelCase ( ) -> None: """simple docstring""" print(autocomplete_using_trie("""de""" ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
662
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase : str = logging.get_logger(__name__) __UpperCAmelCase : List[Any] = { 'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json', # See all WavLM models at https://huggingface.co/models?filter=wavlm } class _snake_case ( lowerCamelCase__ ): _A = 'wavlm' def __init__( self ,UpperCamelCase=32 ,UpperCamelCase=768 ,UpperCamelCase=12 ,UpperCamelCase=12 ,UpperCamelCase=3_072 ,UpperCamelCase="gelu" ,UpperCamelCase=0.1 ,UpperCamelCase=0.1 ,UpperCamelCase=0.1 ,UpperCamelCase=0.0 ,UpperCamelCase=0.1 ,UpperCamelCase=0.1 ,UpperCamelCase=0.02 ,UpperCamelCase=1E-5 ,UpperCamelCase="group" ,UpperCamelCase="gelu" ,UpperCamelCase=(512, 512, 512, 512, 512, 512, 512) ,UpperCamelCase=(5, 2, 2, 2, 2, 2, 2) ,UpperCamelCase=(10, 3, 3, 3, 3, 2, 2) ,UpperCamelCase=False ,UpperCamelCase=128 ,UpperCamelCase=16 ,UpperCamelCase=320 ,UpperCamelCase=800 ,UpperCamelCase=False ,UpperCamelCase=True ,UpperCamelCase=0.05 ,UpperCamelCase=10 ,UpperCamelCase=2 ,UpperCamelCase=0.0 ,UpperCamelCase=10 ,UpperCamelCase=320 ,UpperCamelCase=2 ,UpperCamelCase=0.1 ,UpperCamelCase=100 ,UpperCamelCase=256 ,UpperCamelCase=256 ,UpperCamelCase=0.1 ,UpperCamelCase="mean" ,UpperCamelCase=False ,UpperCamelCase=False ,UpperCamelCase=256 ,UpperCamelCase=(512, 512, 512, 512, 1_500) ,UpperCamelCase=(5, 3, 3, 1, 1) ,UpperCamelCase=(1, 2, 3, 1, 1) ,UpperCamelCase=512 ,UpperCamelCase=80 ,UpperCamelCase=0 ,UpperCamelCase=1 ,UpperCamelCase=2 ,UpperCamelCase=False ,UpperCamelCase=3 ,UpperCamelCase=2 ,UpperCamelCase=3 ,UpperCamelCase=None ,**UpperCamelCase ,) -> Tuple: super().__init__(**__lowerCamelCase ,pad_token_id=__lowerCamelCase ,bos_token_id=__lowerCamelCase ,eos_token_id=__lowerCamelCase ) snake_case__ :Tuple = hidden_size snake_case__ :Dict = feat_extract_norm snake_case__ :List[Any] = feat_extract_activation snake_case__ :Union[str, Any] = list(__lowerCamelCase ) snake_case__ :Union[str, Any] = list(__lowerCamelCase ) snake_case__ :Tuple = list(__lowerCamelCase ) snake_case__ :int = conv_bias snake_case__ :Optional[int] = num_buckets snake_case__ :Dict = max_bucket_distance snake_case__ :Optional[Any] = num_conv_pos_embeddings snake_case__ :Dict = num_conv_pos_embedding_groups snake_case__ :Union[str, Any] = len(self.conv_dim ) snake_case__ :List[str] = num_hidden_layers snake_case__ :Union[str, Any] = intermediate_size snake_case__ :Optional[Any] = hidden_act snake_case__ :List[Any] = num_attention_heads snake_case__ :Tuple = hidden_dropout snake_case__ :List[Any] = attention_dropout snake_case__ :List[str] = activation_dropout snake_case__ :Any = feat_proj_dropout snake_case__ :int = final_dropout snake_case__ :Optional[Any] = layerdrop snake_case__ :Dict = layer_norm_eps snake_case__ :Optional[int] = initializer_range snake_case__ :int = num_ctc_classes snake_case__ :Tuple = vocab_size snake_case__ :Any = do_stable_layer_norm snake_case__ :List[str] = use_weighted_layer_sum snake_case__ :Any = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,' f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 snake_case__ :Union[str, Any] = apply_spec_augment snake_case__ :List[Any] = mask_time_prob snake_case__ :int = mask_time_length snake_case__ :Union[str, Any] = mask_time_min_masks snake_case__ :List[Any] = mask_feature_prob snake_case__ :List[Any] = mask_feature_length # parameters for pretraining with codevector quantized representations snake_case__ :List[str] = num_codevectors_per_group snake_case__ :str = num_codevector_groups snake_case__ :Dict = contrastive_logits_temperature snake_case__ :Any = num_negatives snake_case__ :List[str] = codevector_dim snake_case__ :str = proj_codevector_dim snake_case__ :Union[str, Any] = diversity_loss_weight # ctc loss snake_case__ :Any = ctc_loss_reduction snake_case__ :Dict = ctc_zero_infinity # adapter snake_case__ :int = add_adapter snake_case__ :Dict = adapter_kernel_size snake_case__ :Dict = adapter_stride snake_case__ :Dict = num_adapter_layers snake_case__ :str = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. snake_case__ :Tuple = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. snake_case__ :Union[str, Any] = list(__lowerCamelCase ) snake_case__ :int = list(__lowerCamelCase ) snake_case__ :List[Any] = list(__lowerCamelCase ) snake_case__ :int = xvector_output_dim @property def lowerCAmelCase_ ( self ) -> List[str]: return functools.reduce(operator.mul ,self.conv_stride ,1 )
701
import pytest __UpperCAmelCase : int = "__dummy_dataset1__" __UpperCAmelCase : int = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n" @pytest.fixture def lowercase_ ( ) -> Optional[Any]: '''simple docstring''' return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def lowercase_ ( ) -> Optional[int]: '''simple docstring''' return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def lowercase_ ( __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Any ) -> Dict: '''simple docstring''' snake_case__ :Optional[Any] = dataset_loading_script_name snake_case__ :Optional[Any] = tmp_path / "datasets" / script_name script_dir.mkdir(parents=__snake_case ) snake_case__ :List[Any] = script_dir / F'{script_name}.py' with open(__snake_case , "w" ) as f: f.write(__snake_case ) return str(__snake_case )
57
0
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES snake_case__ : int = logging.get_logger(__name__) snake_case__ : List[str] = OrderedDict( [ # Base model mapping ("""albert""", """FlaxAlbertModel"""), ("""bart""", """FlaxBartModel"""), ("""beit""", """FlaxBeitModel"""), ("""bert""", """FlaxBertModel"""), ("""big_bird""", """FlaxBigBirdModel"""), ("""blenderbot""", """FlaxBlenderbotModel"""), ("""blenderbot-small""", """FlaxBlenderbotSmallModel"""), ("""clip""", """FlaxCLIPModel"""), ("""distilbert""", """FlaxDistilBertModel"""), ("""electra""", """FlaxElectraModel"""), ("""gpt-sw3""", """FlaxGPT2Model"""), ("""gpt2""", """FlaxGPT2Model"""), ("""gpt_neo""", """FlaxGPTNeoModel"""), ("""gptj""", """FlaxGPTJModel"""), ("""longt5""", """FlaxLongT5Model"""), ("""marian""", """FlaxMarianModel"""), ("""mbart""", """FlaxMBartModel"""), ("""mt5""", """FlaxMT5Model"""), ("""opt""", """FlaxOPTModel"""), ("""pegasus""", """FlaxPegasusModel"""), ("""regnet""", """FlaxRegNetModel"""), ("""resnet""", """FlaxResNetModel"""), ("""roberta""", """FlaxRobertaModel"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""), ("""roformer""", """FlaxRoFormerModel"""), ("""t5""", """FlaxT5Model"""), ("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""), ("""vit""", """FlaxViTModel"""), ("""wav2vec2""", """FlaxWav2Vec2Model"""), ("""whisper""", """FlaxWhisperModel"""), ("""xglm""", """FlaxXGLMModel"""), ("""xlm-roberta""", """FlaxXLMRobertaModel"""), ] ) snake_case__ : Optional[Any] = OrderedDict( [ # Model for pre-training mapping ("""albert""", """FlaxAlbertForPreTraining"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForPreTraining"""), ("""big_bird""", """FlaxBigBirdForPreTraining"""), ("""electra""", """FlaxElectraForPreTraining"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) snake_case__ : Tuple = OrderedDict( [ # Model for Masked LM mapping ("""albert""", """FlaxAlbertForMaskedLM"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForMaskedLM"""), ("""big_bird""", """FlaxBigBirdForMaskedLM"""), ("""distilbert""", """FlaxDistilBertForMaskedLM"""), ("""electra""", """FlaxElectraForMaskedLM"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) snake_case__ : Dict = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("""bart""", """FlaxBartForConditionalGeneration"""), ("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""), ("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""), ("""encoder-decoder""", """FlaxEncoderDecoderModel"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""marian""", """FlaxMarianMTModel"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""pegasus""", """FlaxPegasusForConditionalGeneration"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ] ) snake_case__ : Any = OrderedDict( [ # Model for Image-classsification ("""beit""", """FlaxBeitForImageClassification"""), ("""regnet""", """FlaxRegNetForImageClassification"""), ("""resnet""", """FlaxResNetForImageClassification"""), ("""vit""", """FlaxViTForImageClassification"""), ] ) snake_case__ : Optional[int] = OrderedDict( [ ("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""), ] ) snake_case__ : Union[str, Any] = OrderedDict( [ # Model for Causal LM mapping ("""bart""", """FlaxBartForCausalLM"""), ("""bert""", """FlaxBertForCausalLM"""), ("""big_bird""", """FlaxBigBirdForCausalLM"""), ("""electra""", """FlaxElectraForCausalLM"""), ("""gpt-sw3""", """FlaxGPT2LMHeadModel"""), ("""gpt2""", """FlaxGPT2LMHeadModel"""), ("""gpt_neo""", """FlaxGPTNeoForCausalLM"""), ("""gptj""", """FlaxGPTJForCausalLM"""), ("""opt""", """FlaxOPTForCausalLM"""), ("""roberta""", """FlaxRobertaForCausalLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""), ("""xglm""", """FlaxXGLMForCausalLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""), ] ) snake_case__ : Optional[int] = OrderedDict( [ # Model for Sequence Classification mapping ("""albert""", """FlaxAlbertForSequenceClassification"""), ("""bart""", """FlaxBartForSequenceClassification"""), ("""bert""", """FlaxBertForSequenceClassification"""), ("""big_bird""", """FlaxBigBirdForSequenceClassification"""), ("""distilbert""", """FlaxDistilBertForSequenceClassification"""), ("""electra""", """FlaxElectraForSequenceClassification"""), ("""mbart""", """FlaxMBartForSequenceClassification"""), ("""roberta""", """FlaxRobertaForSequenceClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""), ("""roformer""", """FlaxRoFormerForSequenceClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""), ] ) snake_case__ : str = OrderedDict( [ # Model for Question Answering mapping ("""albert""", """FlaxAlbertForQuestionAnswering"""), ("""bart""", """FlaxBartForQuestionAnswering"""), ("""bert""", """FlaxBertForQuestionAnswering"""), ("""big_bird""", """FlaxBigBirdForQuestionAnswering"""), ("""distilbert""", """FlaxDistilBertForQuestionAnswering"""), ("""electra""", """FlaxElectraForQuestionAnswering"""), ("""mbart""", """FlaxMBartForQuestionAnswering"""), ("""roberta""", """FlaxRobertaForQuestionAnswering"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""), ("""roformer""", """FlaxRoFormerForQuestionAnswering"""), ("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""), ] ) snake_case__ : Dict = OrderedDict( [ # Model for Token Classification mapping ("""albert""", """FlaxAlbertForTokenClassification"""), ("""bert""", """FlaxBertForTokenClassification"""), ("""big_bird""", """FlaxBigBirdForTokenClassification"""), ("""distilbert""", """FlaxDistilBertForTokenClassification"""), ("""electra""", """FlaxElectraForTokenClassification"""), ("""roberta""", """FlaxRobertaForTokenClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""), ("""roformer""", """FlaxRoFormerForTokenClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""), ] ) snake_case__ : Dict = OrderedDict( [ # Model for Multiple Choice mapping ("""albert""", """FlaxAlbertForMultipleChoice"""), ("""bert""", """FlaxBertForMultipleChoice"""), ("""big_bird""", """FlaxBigBirdForMultipleChoice"""), ("""distilbert""", """FlaxDistilBertForMultipleChoice"""), ("""electra""", """FlaxElectraForMultipleChoice"""), ("""roberta""", """FlaxRobertaForMultipleChoice"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""), ("""roformer""", """FlaxRoFormerForMultipleChoice"""), ("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""), ] ) snake_case__ : Any = OrderedDict( [ ("""bert""", """FlaxBertForNextSentencePrediction"""), ] ) snake_case__ : Optional[int] = OrderedDict( [ ("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ] ) snake_case__ : List[str] = OrderedDict( [ ("""whisper""", """FlaxWhisperForAudioClassification"""), ] ) snake_case__ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) snake_case__ : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) snake_case__ : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) snake_case__ : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) snake_case__ : Any = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) snake_case__ : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) snake_case__ : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) snake_case__ : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) snake_case__ : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) snake_case__ : Union[str, Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) snake_case__ : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) snake_case__ : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) snake_case__ : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) snake_case__ : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class _a ( _BaseAutoModelClass ): """simple docstring""" A_ = FLAX_MODEL_MAPPING snake_case__ : Optional[int] = auto_class_update(FlaxAutoModel) class _a ( _BaseAutoModelClass ): """simple docstring""" A_ = FLAX_MODEL_FOR_PRETRAINING_MAPPING snake_case__ : Optional[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""") class _a ( _BaseAutoModelClass ): """simple docstring""" A_ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING snake_case__ : Tuple = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""") class _a ( _BaseAutoModelClass ): """simple docstring""" A_ = FLAX_MODEL_FOR_MASKED_LM_MAPPING snake_case__ : List[Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""") class _a ( _BaseAutoModelClass ): """simple docstring""" A_ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING snake_case__ : int = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base""" ) class _a ( _BaseAutoModelClass ): """simple docstring""" A_ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING snake_case__ : Optional[int] = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="""sequence classification""" ) class _a ( _BaseAutoModelClass ): """simple docstring""" A_ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING snake_case__ : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""") class _a ( _BaseAutoModelClass ): """simple docstring""" A_ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING snake_case__ : List[Any] = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="""token classification""" ) class _a ( _BaseAutoModelClass ): """simple docstring""" A_ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING snake_case__ : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""") class _a ( _BaseAutoModelClass ): """simple docstring""" A_ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING snake_case__ : Union[str, Any] = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction""" ) class _a ( _BaseAutoModelClass ): """simple docstring""" A_ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING snake_case__ : Optional[Any] = auto_class_update( FlaxAutoModelForImageClassification, head_doc="""image classification""" ) class _a ( _BaseAutoModelClass ): """simple docstring""" A_ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING snake_case__ : int = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""") class _a ( _BaseAutoModelClass ): """simple docstring""" A_ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING snake_case__ : Optional[int] = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling""" )
23
from __future__ import annotations __A : str = list[tuple[int, int]] __A : Optional[int] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __A : List[str] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float , __lowerCamelCase : Node | None , ): SCREAMING_SNAKE_CASE = pos_x SCREAMING_SNAKE_CASE = pos_y SCREAMING_SNAKE_CASE = (pos_y, pos_x) SCREAMING_SNAKE_CASE = goal_x SCREAMING_SNAKE_CASE = goal_y SCREAMING_SNAKE_CASE = g_cost SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = self.calculate_heuristic() def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = abs(self.pos_x - self.goal_x ) SCREAMING_SNAKE_CASE = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self : Union[str, Any] , __lowerCamelCase : List[Any] ): return self.f_cost < other.f_cost class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Optional[int] , __lowerCamelCase : tuple[int, int] , __lowerCamelCase : tuple[int, int] ): SCREAMING_SNAKE_CASE = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCamelCase ) SCREAMING_SNAKE_CASE = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , __lowerCamelCase ) SCREAMING_SNAKE_CASE = [self.start] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = False def _snake_case ( self : Optional[Any] ): while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() SCREAMING_SNAKE_CASE = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: SCREAMING_SNAKE_CASE = True return self.retrace_path(__lowerCamelCase ) self.closed_nodes.append(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.get_successors(__lowerCamelCase ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(__lowerCamelCase ) else: # retrieve the best current path SCREAMING_SNAKE_CASE = self.open_nodes.pop(self.open_nodes.index(__lowerCamelCase ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(__lowerCamelCase ) else: self.open_nodes.append(__lowerCamelCase ) if not self.reached: return [self.start.pos] return None def _snake_case ( self : List[Any] , __lowerCamelCase : Node ): SCREAMING_SNAKE_CASE = [] for action in delta: SCREAMING_SNAKE_CASE = parent.pos_x + action[1] SCREAMING_SNAKE_CASE = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCamelCase ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( __lowerCamelCase , __lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCamelCase , ) ) return successors def _snake_case ( self : str , __lowerCamelCase : Node | None ): SCREAMING_SNAKE_CASE = node SCREAMING_SNAKE_CASE = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) SCREAMING_SNAKE_CASE = current_node.parent path.reverse() return path if __name__ == "__main__": __A : Optional[Any] = (0, 0) __A : Optional[int] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('------') __A : List[str] = GreedyBestFirst(init, goal) __A : Tuple = greedy_bf.search() if path: for pos_x, pos_y in path: __A : Optional[Any] = 2 for elem in grid: print(elem)
16
0
import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def lowerCAmelCase__ ( a__ , a__=None ) ->str: '''simple docstring''' _UpperCamelCase = None if token is not None: _UpperCamelCase = {"Accept": "application/vnd.github+json", "Authorization": f'Bearer {token}'} _UpperCamelCase = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100' _UpperCamelCase = requests.get(a__ , headers=a__ ).json() _UpperCamelCase = {} try: job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) _UpperCamelCase = math.ceil((result["total_count"] - 100) / 100 ) for i in range(a__ ): _UpperCamelCase = requests.get(url + f'&page={i + 2}' , headers=a__ ).json() job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) return job_links except Exception: print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' ) return {} def lowerCAmelCase__ ( a__ , a__=None ) ->Union[str, Any]: '''simple docstring''' _UpperCamelCase = None if token is not None: _UpperCamelCase = {"Accept": "application/vnd.github+json", "Authorization": f'Bearer {token}'} _UpperCamelCase = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100' _UpperCamelCase = requests.get(a__ , headers=a__ ).json() _UpperCamelCase = {} try: artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) _UpperCamelCase = math.ceil((result["total_count"] - 100) / 100 ) for i in range(a__ ): _UpperCamelCase = requests.get(url + f'&page={i + 2}' , headers=a__ ).json() artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) return artifacts except Exception: print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' ) return {} def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Dict: '''simple docstring''' _UpperCamelCase = None if token is not None: _UpperCamelCase = {"Accept": "application/vnd.github+json", "Authorization": f'Bearer {token}'} _UpperCamelCase = requests.get(a__ , headers=a__ , allow_redirects=a__ ) _UpperCamelCase = result.headers["Location"] _UpperCamelCase = requests.get(a__ , allow_redirects=a__ ) _UpperCamelCase = os.path.join(a__ , f'{artifact_name}.zip' ) with open(a__ , "wb" ) as fp: fp.write(response.content ) def lowerCAmelCase__ ( a__ , a__=None ) ->str: '''simple docstring''' _UpperCamelCase = [] _UpperCamelCase = [] _UpperCamelCase = None with zipfile.ZipFile(a__ ) as z: for filename in z.namelist(): if not os.path.isdir(a__ ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(a__ ) as f: for line in f: _UpperCamelCase = line.decode("UTF-8" ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs _UpperCamelCase = line[: line.index(": " )] _UpperCamelCase = line[line.index(": " ) + len(": " ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith("FAILED " ): # `test` is the test method that failed _UpperCamelCase = line[len("FAILED " ) :] failed_tests.append(a__ ) elif filename == "job_name.txt": _UpperCamelCase = line if len(a__ ) != len(a__ ): raise ValueError( f'`errors` and `failed_tests` should have the same number of elements. Got {len(a__ )} for `errors` ' f'and {len(a__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some' " problem." ) _UpperCamelCase = None if job_name and job_links: _UpperCamelCase = job_links.get(a__ , a__ ) # A list with elements of the form (line of error, error, failed test) _UpperCamelCase = [x + [y] + [job_link] for x, y in zip(a__ , a__ )] return result def lowerCAmelCase__ ( a__ , a__=None ) ->Dict: '''simple docstring''' _UpperCamelCase = [] _UpperCamelCase = [os.path.join(a__ , a__ ) for p in os.listdir(a__ ) if p.endswith(".zip" )] for p in paths: errors.extend(get_errors_from_single_artifact(a__ , job_links=a__ ) ) return errors def lowerCAmelCase__ ( a__ , a__=None ) ->Tuple: '''simple docstring''' _UpperCamelCase = Counter() counter.update([x[1] for x in logs] ) _UpperCamelCase = counter.most_common() _UpperCamelCase = {} for error, count in counts: if error_filter is None or error not in error_filter: _UpperCamelCase = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]} _UpperCamelCase = dict(sorted(r.items() , key=lambda a__ : item[1]["count"] , reverse=a__ ) ) return r def lowerCAmelCase__ ( a__ ) ->Optional[Any]: '''simple docstring''' _UpperCamelCase = test.split("::" )[0] if test.startswith("tests/models/" ): _UpperCamelCase = test.split("/" )[2] else: _UpperCamelCase = None return test def lowerCAmelCase__ ( a__ , a__=None ) ->int: '''simple docstring''' _UpperCamelCase = [(x[0], x[1], get_model(x[2] )) for x in logs] _UpperCamelCase = [x for x in logs if x[2] is not None] _UpperCamelCase = {x[2] for x in logs} _UpperCamelCase = {} for test in tests: _UpperCamelCase = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) _UpperCamelCase = counter.most_common() _UpperCamelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} _UpperCamelCase = sum(error_counts.values() ) if n_errors > 0: _UpperCamelCase = {"count": n_errors, "errors": error_counts} _UpperCamelCase = dict(sorted(r.items() , key=lambda a__ : item[1]["count"] , reverse=a__ ) ) return r def lowerCAmelCase__ ( a__ ) ->Dict: '''simple docstring''' _UpperCamelCase = "| no. | error | status |" _UpperCamelCase = "|-:|:-|:-|" _UpperCamelCase = [header, sep] for error in reduced_by_error: _UpperCamelCase = reduced_by_error[error]["count"] _UpperCamelCase = f'| {count} | {error[:100]} | |' lines.append(a__ ) return "\n".join(a__ ) def lowerCAmelCase__ ( a__ ) ->Tuple: '''simple docstring''' _UpperCamelCase = "| model | no. of errors | major error | count |" _UpperCamelCase = "|-:|-:|-:|-:|" _UpperCamelCase = [header, sep] for model in reduced_by_model: _UpperCamelCase = reduced_by_model[model]["count"] _UpperCamelCase , _UpperCamelCase = list(reduced_by_model[model]["errors"].items() )[0] _UpperCamelCase = f'| {model} | {count} | {error[:60]} | {_count} |' lines.append(a__ ) return "\n".join(a__ ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''') parser.add_argument( '''--output_dir''', type=str, required=True, help='''Where to store the downloaded artifacts and other result files.''', ) parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''') lowerCamelCase__ = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) lowerCamelCase__ = get_job_links(args.workflow_run_id, token=args.token) lowerCamelCase__ = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: lowerCamelCase__ = k.find(''' / ''') lowerCamelCase__ = k[index + len(''' / ''') :] lowerCamelCase__ = v with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) lowerCamelCase__ = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) lowerCamelCase__ = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error lowerCamelCase__ = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors lowerCamelCase__ = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) lowerCamelCase__ = reduce_by_error(errors) lowerCamelCase__ = reduce_by_model(errors) lowerCamelCase__ = make_github_table(reduced_by_error) lowerCamelCase__ = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp: fp.write(sa) with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp: fp.write(sa)
82
lowerCamelCase__ = '''Alexander Joslin''' import operator as op from .stack import Stack def lowerCAmelCase__ ( a__ ) ->int: '''simple docstring''' _UpperCamelCase = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub} _UpperCamelCase = Stack() _UpperCamelCase = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(a__ ) ) elif i in operators: # RULE 2 operator_stack.push(a__ ) elif i == ")": # RULE 4 _UpperCamelCase = operator_stack.peek() operator_stack.pop() _UpperCamelCase = operand_stack.peek() operand_stack.pop() _UpperCamelCase = operand_stack.peek() operand_stack.pop() _UpperCamelCase = operators[opr](a__ , a__ ) operand_stack.push(a__ ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": lowerCamelCase__ = '''(5 + ((4 * 2) * (2 + 3)))''' # answer = 45 print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
82
1
from ...processing_utils import ProcessorMixin class UpperCAmelCase_ ( UpperCamelCase_ ): '''simple docstring''' a__ = """SpeechT5FeatureExtractor""" a__ = """SpeechT5Tokenizer""" def __init__( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] ) -> Dict: """simple docstring""" super().__init__(a__ , a__ ) def __call__( self : Tuple , *UpperCamelCase__ : Any , **UpperCamelCase__ : int ) -> Optional[int]: """simple docstring""" __magic_name__ = kwargs.pop("""audio""" , a__ ) __magic_name__ = kwargs.pop("""text""" , a__ ) __magic_name__ = kwargs.pop("""text_target""" , a__ ) __magic_name__ = kwargs.pop("""audio_target""" , a__ ) __magic_name__ = kwargs.pop("""sampling_rate""" , a__ ) if audio is not None and text is not None: raise ValueError( """Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" ) if audio_target is not None and text_target is not None: raise ValueError( """Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" ) if audio is None and audio_target is None and text is None and text_target is None: raise ValueError( """You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" ) if audio is not None: __magic_name__ = self.feature_extractor(a__ , *a__ , sampling_rate=a__ , **a__ ) elif text is not None: __magic_name__ = self.tokenizer(a__ , **a__ ) else: __magic_name__ = None if audio_target is not None: __magic_name__ = self.feature_extractor(audio_target=a__ , *a__ , sampling_rate=a__ , **a__ ) __magic_name__ = targets["input_values"] elif text_target is not None: __magic_name__ = self.tokenizer(a__ , **a__ ) __magic_name__ = targets["input_ids"] else: __magic_name__ = None if inputs is None: return targets if targets is not None: __magic_name__ = labels __magic_name__ = targets.get("""attention_mask""" ) if decoder_attention_mask is not None: __magic_name__ = decoder_attention_mask return inputs def _lowercase ( self : Dict , *UpperCamelCase__ : int , **UpperCamelCase__ : Any ) -> str: """simple docstring""" __magic_name__ = kwargs.pop("""input_values""" , a__ ) __magic_name__ = kwargs.pop("""input_ids""" , a__ ) __magic_name__ = kwargs.pop("""labels""" , a__ ) if input_values is not None and input_ids is not None: raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" ) if input_values is None and input_ids is None and labels is None: raise ValueError( """You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" ) if input_values is not None: __magic_name__ = self.feature_extractor.pad(a__ , *a__ , **a__ ) elif input_ids is not None: __magic_name__ = self.tokenizer.pad(a__ , **a__ ) else: __magic_name__ = None if labels is not None: if "input_ids" in labels or (isinstance(a__ , a__ ) and "input_ids" in labels[0]): __magic_name__ = self.tokenizer.pad(a__ , **a__ ) __magic_name__ = targets["input_ids"] else: __magic_name__ = self.feature_extractor.feature_size __magic_name__ = self.feature_extractor.num_mel_bins __magic_name__ = self.feature_extractor.pad(a__ , *a__ , **a__ ) __magic_name__ = feature_size_hack __magic_name__ = targets["input_values"] else: __magic_name__ = None if inputs is None: return targets if targets is not None: __magic_name__ = labels __magic_name__ = targets.get("""attention_mask""" ) if decoder_attention_mask is not None: __magic_name__ = decoder_attention_mask return inputs def _lowercase ( self : Dict , *UpperCamelCase__ : int , **UpperCamelCase__ : Any ) -> Optional[int]: """simple docstring""" return self.tokenizer.batch_decode(*a__ , **a__ ) def _lowercase ( self : Any , *UpperCamelCase__ : Any , **UpperCamelCase__ : Optional[int] ) -> str: """simple docstring""" return self.tokenizer.decode(*a__ , **a__ )
529
'''simple docstring''' import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class lowerCAmelCase : @staticmethod def _A ( *a__ : str , **a__ : List[str] ): '''simple docstring''' pass def UpperCAmelCase_ ( lowerCamelCase_ ): """simple docstring""" lowerCAmelCase__ : List[str] = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class lowerCAmelCase ( unittest.TestCase ): A_ : List[Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def _A ( self : List[Any] , a__ : int , a__ : Dict , a__ : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : List[str] = DepthEstimationPipeline(model=a__ , image_processor=a__ ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def _A ( self : int , a__ : Optional[int] , a__ : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Any = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" ) self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , a__ ) import datasets lowerCAmelCase__ : List[str] = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" ) lowerCAmelCase__ : List[str] = depth_estimator( [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] ) self.assertEqual( [ {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, ] , a__ , ) @require_tf @unittest.skip("Depth estimation is not implemented in TF" ) def _A ( self : Optional[Any] ): '''simple docstring''' pass @slow @require_torch def _A ( self : Any ): '''simple docstring''' lowerCAmelCase__ : str = "Intel/dpt-large" lowerCAmelCase__ : Dict = pipeline("depth-estimation" , model=a__ ) lowerCAmelCase__ : Any = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" ) lowerCAmelCase__ : Any = hashimage(outputs["depth"] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 ) @require_torch def _A ( self : Any ): '''simple docstring''' self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
378
0
from math import factorial, radians def A__ ( __A , __A = 18 , __A = 10 ): '''simple docstring''' _lowerCamelCase : List[Any] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians _lowerCamelCase : List[Any] = radians(__A ) _lowerCamelCase : Tuple = angle_in_radians _lowerCamelCase : Any = 3 _lowerCamelCase : Any = -1 for _ in range(__A ): result += (b * (angle_in_radians**a)) / factorial(__A ) _lowerCamelCase : List[Any] = -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(__A , __A ) if __name__ == "__main__": __import__("doctest").testmod()
15
from __future__ import annotations class __snake_case : '''simple docstring''' def __init__( self : Tuple , _UpperCamelCase : int = 0) ->str: """simple docstring""" _lowerCamelCase : Union[str, Any] = key def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Union[str, Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(_UpperCamelCase) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Optional[int] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(_UpperCamelCase) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : int = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned _lowerCamelCase : Any = """""" for ch in content: ans += chr(ord(_UpperCamelCase) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : int = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned _lowerCamelCase : Optional[Any] = """""" for ch in content: ans += chr(ord(_UpperCamelCase) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->bool: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) try: with open(_UpperCamelCase) as fin, open("""encrypt.out""" , """w+""") as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(_UpperCamelCase , _UpperCamelCase)) except OSError: return False return True def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int) ->bool: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) try: with open(_UpperCamelCase) as fin, open("""decrypt.out""" , """w+""") as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(_UpperCamelCase , _UpperCamelCase)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
15
1
from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, flip_channel_order, get_resize_output_image_size, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging if is_vision_available(): import PIL if is_torch_available(): import torch SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__) class snake_case ( UpperCamelCase_ ): lowercase_ = ['pixel_values'] def __init__( self : List[str] , a_ : bool = True , a_ : Dict[str, int] = None , a_ : PILImageResampling = PILImageResampling.BILINEAR , a_ : bool = True , a_ : Union[int, float] = 1 / 255 , a_ : bool = True , a_ : Dict[str, int] = None , a_ : bool = True , **a_ : str , )-> None: """simple docstring""" super().__init__(**a_ ) SCREAMING_SNAKE_CASE__ : Any = size if size is not None else {'shortest_edge': 224} SCREAMING_SNAKE_CASE__ : int = get_size_dict(a_ , default_to_square=a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = crop_size if crop_size is not None else {'height': 256, 'width': 256} SCREAMING_SNAKE_CASE__ : str = get_size_dict(a_ , param_name='crop_size' ) SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize SCREAMING_SNAKE_CASE__ : str = size SCREAMING_SNAKE_CASE__ : str = resample SCREAMING_SNAKE_CASE__ : List[str] = do_rescale SCREAMING_SNAKE_CASE__ : Tuple = rescale_factor SCREAMING_SNAKE_CASE__ : List[str] = do_center_crop SCREAMING_SNAKE_CASE__ : Tuple = crop_size SCREAMING_SNAKE_CASE__ : Optional[int] = do_flip_channel_order def __lowercase( self : str , a_ : np.ndarray , a_ : Dict[str, int] , a_ : PILImageResampling = PIL.Image.BILINEAR , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : List[Any] , )-> np.ndarray: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = get_size_dict(a_ , default_to_square=a_ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' ) SCREAMING_SNAKE_CASE__ : List[Any] = get_resize_output_image_size(a_ , size=size['shortest_edge'] , default_to_square=a_ ) return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ ) def __lowercase( self : Any , a_ : np.ndarray , a_ : Dict[str, int] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Dict , )-> np.ndarray: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = get_size_dict(a_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' ) return center_crop(a_ , size=(size['height'], size['width']) , data_format=a_ , **a_ ) def __lowercase( self : Union[str, Any] , a_ : np.ndarray , a_ : Union[int, float] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Dict , )-> Dict: """simple docstring""" return rescale(a_ , scale=a_ , data_format=a_ , **a_ ) def __lowercase( self : Optional[Any] , a_ : np.ndarray , a_ : Optional[Union[str, ChannelDimension]] = None )-> np.ndarray: """simple docstring""" return flip_channel_order(a_ , data_format=a_ ) def __lowercase( self : int , a_ : ImageInput , a_ : bool = None , a_ : Dict[str, int] = None , a_ : PILImageResampling = None , a_ : bool = None , a_ : float = None , a_ : bool = None , a_ : Dict[str, int] = None , a_ : bool = None , a_ : Optional[Union[str, TensorType]] = None , a_ : ChannelDimension = ChannelDimension.FIRST , **a_ : List[Any] , )-> PIL.Image.Image: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE__ : List[str] = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE__ : str = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE__ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop SCREAMING_SNAKE_CASE__ : Any = ( do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order ) SCREAMING_SNAKE_CASE__ : Dict = size if size is not None else self.size SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a_ , default_to_square=a_ ) SCREAMING_SNAKE_CASE__ : Dict = crop_size if crop_size is not None else self.crop_size SCREAMING_SNAKE_CASE__ : List[str] = get_size_dict(a_ , param_name='crop_size' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = make_list_of_images(a_ ) if not valid_images(a_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE__ : List[str] = [to_numpy_array(a_ ) for image in images] if do_resize: SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.resize(image=a_ , size=a_ , resample=a_ ) for image in images] if do_center_crop: SCREAMING_SNAKE_CASE__ : int = [self.center_crop(image=a_ , size=a_ ) for image in images] if do_rescale: SCREAMING_SNAKE_CASE__ : Tuple = [self.rescale(image=a_ , scale=a_ ) for image in images] # the pretrained checkpoints assume images are BGR, not RGB if do_flip_channel_order: SCREAMING_SNAKE_CASE__ : List[Any] = [self.flip_channel_order(image=a_ ) for image in images] SCREAMING_SNAKE_CASE__ : int = [to_channel_dimension_format(a_ , a_ ) for image in images] SCREAMING_SNAKE_CASE__ : List[Any] = {'pixel_values': images} return BatchFeature(data=a_ , tensor_type=a_ ) def __lowercase( self : Optional[Any] , a_ : Any , a_ : List[Tuple] = None )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(a_ ) != len(a_ ): raise ValueError( 'Make sure that you pass in as many target sizes as the batch dimension of the logits' ) if is_torch_tensor(a_ ): SCREAMING_SNAKE_CASE__ : Optional[int] = target_sizes.numpy() SCREAMING_SNAKE_CASE__ : Any = [] for idx in range(len(a_ ) ): SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=a_ ) SCREAMING_SNAKE_CASE__ : Any = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(a_ ) else: SCREAMING_SNAKE_CASE__ : str = logits.argmax(dim=1 ) SCREAMING_SNAKE_CASE__ : int = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
85
"""simple docstring""" def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float ) -> float: if density <= 0: raise ValueError('Impossible fluid density' ) if bulk_modulus <= 0: raise ValueError('Impossible bulk modulus' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
549
0
from abc import ABC, abstractmethod from argparse import ArgumentParser class lowerCamelCase__ ( UpperCAmelCase ): """simple docstring""" @staticmethod @abstractmethod def snake_case__ ( snake_case ): '''simple docstring''' raise NotImplementedError() @abstractmethod def snake_case__ ( self ): '''simple docstring''' raise NotImplementedError()
185
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __UpperCamelCase = { 'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig'] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ 'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'ResNetForImageClassification', 'ResNetModel', 'ResNetPreTrainedModel', 'ResNetBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ 'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFResNetForImageClassification', 'TFResNetModel', 'TFResNetPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ 'FlaxResNetForImageClassification', 'FlaxResNetModel', 'FlaxResNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys __UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
185
1
import re def _a ( lowerCAmelCase )-> list: return [char.split() for char in re.split(R'[^ a-z A-Z 0-9 \s]' , str_ )] def _a ( lowerCAmelCase )-> str: SCREAMING_SNAKE_CASE_ = split_input(str_ ) return "".join( [''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def _a ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )-> str: try: SCREAMING_SNAKE_CASE_ = split_input(lowerCAmelCase ) if upper: SCREAMING_SNAKE_CASE_ = ''.join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: SCREAMING_SNAKE_CASE_ = ''.join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def _a ( lowerCAmelCase )-> str: return to_simple_case(lowerCAmelCase ) def _a ( lowerCAmelCase )-> str: try: SCREAMING_SNAKE_CASE_ = to_simple_case(lowerCAmelCase ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def _a ( lowerCAmelCase , lowerCAmelCase )-> str: return to_complex_case(lowerCAmelCase , lowerCAmelCase , '_' ) def _a ( lowerCAmelCase , lowerCAmelCase )-> str: return to_complex_case(lowerCAmelCase , lowerCAmelCase , '-' ) if __name__ == "__main__": __import__('''doctest''').testmod()
360
from math import isqrt def _a ( lowerCAmelCase )-> bool: return all(number % divisor != 0 for divisor in range(2 , isqrt(lowerCAmelCase ) + 1 ) ) def _a ( lowerCAmelCase = 10**6 )-> int: SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = 7 while prime_candidate < max_prime: primes_count += is_prime(lowerCAmelCase ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(f"""{solution() = }""")
360
1
"""simple docstring""" def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = """""" for i in table: res += inp[i - 1] return res def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" return data[1:] + data[0] def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = """""" for i in range(len(lowerCamelCase__ ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = int("""0b""" + data[0] + data[-1] , 2 ) lowerCAmelCase__ = int("""0b""" + data[1:3] , 2 ) return bin(s[row][col] )[2:] def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = message[:4] lowerCAmelCase__ = message[4:] lowerCAmelCase__ = apply_table(lowerCamelCase__ , lowerCamelCase__ ) lowerCAmelCase__ = xor(lowerCamelCase__ , lowerCamelCase__ ) lowerCAmelCase__ = apply_sbox(lowerCamelCase__ , temp[:4] ) # noqa: E741 lowerCAmelCase__ = apply_sbox(lowerCamelCase__ , temp[4:] ) lowerCAmelCase__ = """0""" * (2 - len(lowerCamelCase__ )) + l # noqa: E741 lowerCAmelCase__ = """0""" * (2 - len(lowerCamelCase__ )) + r lowerCAmelCase__ = apply_table(l + r , lowerCamelCase__ ) lowerCAmelCase__ = xor(lowerCamelCase__ , lowerCamelCase__ ) return temp + right if __name__ == "__main__": __lowerCAmelCase : List[str] = input("Enter 10 bit key: ") __lowerCAmelCase : Optional[int] = input("Enter 8 bit message: ") __lowerCAmelCase : str = [6, 3, 7, 4, 8, 5, 10, 9] __lowerCAmelCase : Union[str, Any] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] __lowerCAmelCase : Tuple = [2, 4, 3, 1] __lowerCAmelCase : Dict = [2, 6, 3, 1, 4, 8, 5, 7] __lowerCAmelCase : str = [4, 1, 3, 5, 7, 2, 8, 6] __lowerCAmelCase : List[str] = [4, 1, 2, 3, 2, 3, 4, 1] __lowerCAmelCase : Union[str, Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] __lowerCAmelCase : str = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation __lowerCAmelCase : List[Any] = apply_table(key, paa_table) __lowerCAmelCase : Dict = temp[:5] __lowerCAmelCase : List[Any] = temp[5:] __lowerCAmelCase : int = left_shift(left) __lowerCAmelCase : Optional[Any] = left_shift(right) __lowerCAmelCase : List[Any] = apply_table(left + right, pa_table) __lowerCAmelCase : Tuple = left_shift(left) __lowerCAmelCase : Dict = left_shift(right) __lowerCAmelCase : Any = left_shift(left) __lowerCAmelCase : Optional[Any] = left_shift(right) __lowerCAmelCase : int = apply_table(left + right, pa_table) # encryption __lowerCAmelCase : Union[str, Any] = apply_table(message, IP) __lowerCAmelCase : Dict = function(expansion, sa, sa, keya, temp) __lowerCAmelCase : List[Any] = temp[4:] + temp[:4] __lowerCAmelCase : List[str] = function(expansion, sa, sa, keya, temp) __lowerCAmelCase : Any = apply_table(temp, IP_inv) print("Cipher text is:", CT) # decryption __lowerCAmelCase : Union[str, Any] = apply_table(CT, IP) __lowerCAmelCase : int = function(expansion, sa, sa, keya, temp) __lowerCAmelCase : List[str] = temp[4:] + temp[:4] __lowerCAmelCase : List[str] = function(expansion, sa, sa, keya, temp) __lowerCAmelCase : List[str] = apply_table(temp, IP_inv) print("Plain text after decypting is:", PT)
704
"""simple docstring""" from __future__ import annotations from math import gcd def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 2 , lowerCamelCase__ = 1 , lowerCamelCase__ = 3 , ): """simple docstring""" if num < 2: raise ValueError("""The input value cannot be less than 2""" ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int: return (pow(lowerCamelCase__ , 2 ) + step) % modulus for _ in range(lowerCamelCase__ ): # These track the position within the cycle detection logic. lowerCAmelCase__ = seed lowerCAmelCase__ = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. lowerCAmelCase__ = gcd(hare - tortoise , lowerCamelCase__ ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. lowerCAmelCase__ = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse __lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( "num", type=int, help="The value to find a divisor of", ) parser.add_argument( "--attempts", type=int, default=3, help="The number of attempts before giving up", ) __lowerCAmelCase : List[str] = parser.parse_args() __lowerCAmelCase : Dict = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(F"{args.num} is probably prime") else: __lowerCAmelCase : List[str] = args.num // divisor print(F"{args.num} = {divisor} * {quotient}")
674
0
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING UpperCAmelCase_ = logging.get_logger(__name__) @add_end_docstrings(A__ ) class __UpperCamelCase ( A__ ): def __init__( self , *_UpperCamelCase , **_UpperCamelCase ): super().__init__(*_UpperCamelCase , **_UpperCamelCase ) self.check_model_type(_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ): _UpperCAmelCase , _UpperCAmelCase = {}, {} if padding is not None: _UpperCAmelCase = padding if truncation is not None: _UpperCAmelCase = truncation if top_k is not None: _UpperCAmelCase = top_k return preprocess_params, {}, postprocess_params def __call__( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ): if isinstance(_UpperCamelCase , (Image.Image, str) ) and isinstance(_UpperCamelCase , _UpperCamelCase ): _UpperCAmelCase = {'''image''': image, '''question''': question} else: _UpperCAmelCase = image _UpperCAmelCase = super().__call__(_UpperCamelCase , **_UpperCamelCase ) return results def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ): _UpperCAmelCase = load_image(inputs['''image'''] ) _UpperCAmelCase = self.tokenizer( inputs['''question'''] , return_tensors=self.framework , padding=_UpperCamelCase , truncation=_UpperCamelCase ) _UpperCAmelCase = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework ) model_inputs.update(_UpperCamelCase ) return model_inputs def UpperCamelCase( self , _UpperCamelCase ): _UpperCAmelCase = self.model(**_UpperCamelCase ) return model_outputs def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=5 ): if top_k > self.model.config.num_labels: _UpperCAmelCase = self.model.config.num_labels if self.framework == "pt": _UpperCAmelCase = model_outputs.logits.sigmoid()[0] _UpperCAmelCase , _UpperCAmelCase = probs.topk(_UpperCamelCase ) else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) _UpperCAmelCase = scores.tolist() _UpperCAmelCase = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
32
from ..utils import DummyObject, requires_backends class __UpperCamelCase ( metaclass=A__ ): __A : str = ["""torch""", """scipy"""] def __init__( self , *_UpperCamelCase , **_UpperCamelCase ): requires_backends(self , ['''torch''', '''scipy'''] ) @classmethod def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ): requires_backends(cls , ['''torch''', '''scipy'''] ) @classmethod def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ): requires_backends(cls , ['''torch''', '''scipy'''] )
32
1
"""simple docstring""" import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging snake_case = logging.get_logger(__name__) snake_case = {'''vocab_file''': '''vocab.json'''} snake_case = { '''vocab_file''': { '''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''', } } snake_case = {'''mgp-str''': 2_7} class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): A__ : Dict = VOCAB_FILES_NAMES A__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP A__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int]="[GO]" , __lowerCamelCase : Union[str, Any]="[GO]" , __lowerCamelCase : int="[s]" , __lowerCamelCase : str="[GO]" , **__lowerCamelCase : str ): """simple docstring""" super().__init__( unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding='''utf-8''' ) as vocab_handle: _snake_case = json.load(__lowerCamelCase ) _snake_case = {v: k for k, v in self.vocab.items()} @property def __UpperCAmelCase ( self : Optional[Any] ): """simple docstring""" return len(self.vocab ) def __UpperCAmelCase ( self : List[str] ): """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Optional[int] ): """simple docstring""" _snake_case = [] for s in text: char_tokens.extend(__lowerCamelCase ) return char_tokens def __UpperCAmelCase ( self : int , __lowerCamelCase : Dict ): """simple docstring""" return self.vocab.get(__lowerCamelCase , self.vocab.get(self.unk_token ) ) def __UpperCAmelCase ( self : Any , __lowerCamelCase : Dict ): """simple docstring""" return self.decoder.get(__lowerCamelCase ) def __UpperCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): """simple docstring""" if not os.path.isdir(__lowerCamelCase ): logger.error('''Vocabulary path ({}) should be a directory'''.format(__lowerCamelCase ) ) return _snake_case = os.path.join( __lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + '''\n''' ) return (vocab_file,)
404
"""simple docstring""" from collections.abc import Sequence from queue import Queue class UpperCAmelCase : def __init__( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Any=None , __lowerCamelCase : List[Any]=None ): """simple docstring""" _snake_case = start _snake_case = end _snake_case = val _snake_case = (start + end) // 2 _snake_case = left _snake_case = right def __repr__( self : List[str] ): """simple docstring""" return f"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})""" class UpperCAmelCase : def __init__( self : Dict , __lowerCamelCase : Sequence , __lowerCamelCase : Tuple ): """simple docstring""" _snake_case = collection _snake_case = function if self.collection: _snake_case = self._build_tree(0 , len(__lowerCamelCase ) - 1 ) def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Dict ): """simple docstring""" self._update_tree(self.root , __lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ): """simple docstring""" return self._query_range(self.root , __lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : int ): """simple docstring""" if start == end: return SegmentTreeNode(__lowerCamelCase , __lowerCamelCase , self.collection[start] ) _snake_case = (start + end) // 2 _snake_case = self._build_tree(__lowerCamelCase , __lowerCamelCase ) _snake_case = self._build_tree(mid + 1 , __lowerCamelCase ) return SegmentTreeNode(__lowerCamelCase , __lowerCamelCase , self.fn(left.val , right.val ) , __lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ): """simple docstring""" if node.start == i and node.end == i: _snake_case = val return if i <= node.mid: self._update_tree(node.left , __lowerCamelCase , __lowerCamelCase ) else: self._update_tree(node.right , __lowerCamelCase , __lowerCamelCase ) _snake_case = self.fn(node.left.val , node.right.val ) def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ): """simple docstring""" if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left , __lowerCamelCase , __lowerCamelCase ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left , __lowerCamelCase , node.mid ) , self._query_range(node.right , node.mid + 1 , __lowerCamelCase ) , ) else: # range in right child tree return self._query_range(node.right , __lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : Tuple ): """simple docstring""" if self.root is not None: _snake_case = Queue() queue.put(self.root ) while not queue.empty(): _snake_case = queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print('''*''' * 5_0) snake_case = SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
404
1
def A__ ( SCREAMING_SNAKE_CASE_ : int = 10_00 ) -> int: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = 1, 1 _UpperCAmelCase = 2 while True: _UpperCAmelCase = 0 _UpperCAmelCase = fa + fa _UpperCAmelCase , _UpperCAmelCase = fa, f index += 1 for _ in str(SCREAMING_SNAKE_CASE_ ): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
32
from math import sqrt def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A__ ( SCREAMING_SNAKE_CASE_ : int = 1_00_01 ) -> int: """simple docstring""" _UpperCAmelCase = 0 _UpperCAmelCase = 1 while count != nth and number < 3: number += 1 if is_prime(SCREAMING_SNAKE_CASE_ ): count += 1 while count != nth: number += 2 if is_prime(SCREAMING_SNAKE_CASE_ ): count += 1 return number if __name__ == "__main__": print(f'''{solution() = }''')
32
1
from __future__ import annotations def A ( lowercase__ : list[int] ) -> int: if not nums: return 0 UpperCamelCase__ :Dict = nums[0] UpperCamelCase__ :Dict = 0 for num in nums[1:]: UpperCamelCase__ :Optional[Any] = ( max_excluding + num, max(lowercase__ , lowercase__ ), ) return max(lowercase__ , lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod()
709
def A ( lowercase__ : List[str]=2_8123 ) -> Union[str, Any]: UpperCamelCase__ :Optional[Any] = [1] * (limit + 1) for i in range(2 , int(limit**0.5 ) + 1 ): sum_divs[i * i] += i for k in range(i + 1 , limit // i + 1 ): sum_divs[k * i] += k + i UpperCamelCase__ :Optional[int] = set() UpperCamelCase__ :Optional[Any] = 0 for n in range(1 , limit + 1 ): if sum_divs[n] > n: abundants.add(lowercase__ ) if not any((n - a in abundants) for a in abundants ): res += n return res if __name__ == "__main__": print(solution())
383
0
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase : Optional[int] = { """configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""], """feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""], """processing_mctct""": ["""MCTCTProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Union[str, Any] = [ """MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""", """MCTCTForCTC""", """MCTCTModel""", """MCTCTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
302
from cva import destroyAllWindows, imread, imshow, waitKey def A_ ( A__ ) -> Tuple: # getting number of pixels in the image a__ , a__ : Any = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(A__ ): for j in range(A__ ): a__ : str = [255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image lowercase : Dict = imread("""image_data/lena.jpg""", 1) # convert to its negative lowercase : Tuple = convert_to_negative(img) # show result image imshow("""negative of original image""", img) waitKey(0) destroyAllWindows()
302
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A = { '''configuration_blenderbot''': [ '''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BlenderbotConfig''', '''BlenderbotOnnxConfig''', ], '''tokenization_blenderbot''': ['''BlenderbotTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = ['''BlenderbotTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ '''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BlenderbotForCausalLM''', '''BlenderbotForConditionalGeneration''', '''BlenderbotModel''', '''BlenderbotPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ '''TFBlenderbotForConditionalGeneration''', '''TFBlenderbotModel''', '''TFBlenderbotPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ '''FlaxBlenderbotForConditionalGeneration''', '''FlaxBlenderbotModel''', '''FlaxBlenderbotPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
719
from __future__ import annotations import math def __UpperCAmelCase ( __A , __A ) -> float: '''simple docstring''' UpperCAmelCase__ = u for i in range(1 , __A ): UpperCAmelCase__ = temp * (u - i) return temp def __UpperCAmelCase ( ) -> None: '''simple docstring''' UpperCAmelCase__ = int(input("enter the numbers of values: " ) ) UpperCAmelCase__ = [] for _ in range(__A ): y.append([] ) for i in range(__A ): for j in range(__A ): y[i].append(__A ) UpperCAmelCase__ = 0 print("enter the values of parameters in a list: " ) UpperCAmelCase__ = list(map(__A , input().split() ) ) print("enter the values of corresponding parameters: " ) for i in range(__A ): UpperCAmelCase__ = float(input() ) UpperCAmelCase__ = int(input("enter the value to interpolate: " ) ) UpperCAmelCase__ = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , __A ): for j in range(n - i ): UpperCAmelCase__ = y[j + 1][i - 1] - y[j][i - 1] UpperCAmelCase__ = y[0][0] for i in range(1 , __A ): summ += (ucal(__A , __A ) * y[0][i]) / math.factorial(__A ) print(F"""the value at {value} is {summ}""" ) if __name__ == "__main__": main()
277
0
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _UpperCamelCase : int =logging.get_logger(__name__) _UpperCamelCase : List[Any] ={"""tokenizer_file""": """tokenizer.json"""} _UpperCamelCase : List[str] ={ """tokenizer_file""": { """bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""", """bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""", """bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""", """bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""", """bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""", """bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""", """bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""", }, } class _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask'] SCREAMING_SNAKE_CASE_ = None def __init__( self , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case="<unk>" , _snake_case="<s>" , _snake_case="</s>" , _snake_case="<pad>" , _snake_case=False , _snake_case=False , **_snake_case , ): """simple docstring""" super().__init__( lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , pad_token=lowercase__ , add_prefix_space=lowercase__ , clean_up_tokenization_spaces=lowercase__ , **lowercase__ , ) __lowerCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space: __lowerCamelCase = getattr(lowercase__ , pre_tok_state.pop('''type''' ) ) __lowerCamelCase = add_prefix_space __lowerCamelCase = pre_tok_class(**lowercase__ ) __lowerCamelCase = add_prefix_space def _lowerCamelCase ( self , *_snake_case , **_snake_case ): """simple docstring""" __lowerCamelCase = kwargs.get('''is_split_into_words''' , lowercase__ ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' ''' pretokenized inputs.''' ) return super()._batch_encode_plus(*lowercase__ , **lowercase__ ) def _lowerCamelCase ( self , *_snake_case , **_snake_case ): """simple docstring""" __lowerCamelCase = kwargs.get('''is_split_into_words''' , lowercase__ ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' ''' pretokenized inputs.''' ) return super()._encode_plus(*lowercase__ , **lowercase__ ) def _lowerCamelCase ( self , _snake_case , _snake_case = None ): """simple docstring""" __lowerCamelCase = self._tokenizer.model.save(lowercase__ , name=lowercase__ ) return tuple(lowercase__ ) def _lowerCamelCase ( self , _snake_case ): """simple docstring""" __lowerCamelCase = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] ) if len(lowercase__ ) > self.model_max_length: __lowerCamelCase = input_ids[-self.model_max_length :] return input_ids
316
'''simple docstring''' import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: lowerCAmelCase_ : Dict = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def __init__( self : int , lowercase__ : Tuple , lowercase__ : Tuple=7 , lowercase__ : Any=3 , lowercase__ : Optional[Any]=18 , lowercase__ : int=30 , lowercase__ : Dict=400 , lowercase__ : List[Any]=None , lowercase__ : List[str]=True , lowercase__ : Optional[Any]=True , lowercase__ : Tuple=None , ) ->List[str]: '''simple docstring''' _UpperCamelCase : Dict = size if size is not None else {"height": 20, "width": 20} _UpperCamelCase : Optional[int] = parent _UpperCamelCase : str = batch_size _UpperCamelCase : Dict = num_channels _UpperCamelCase : Union[str, Any] = image_size _UpperCamelCase : Tuple = min_resolution _UpperCamelCase : Tuple = max_resolution _UpperCamelCase : List[Any] = size _UpperCamelCase : Dict = do_normalize _UpperCamelCase : Tuple = do_convert_rgb _UpperCamelCase : str = [512, 1_024, 2_048, 4_096] _UpperCamelCase : Optional[int] = patch_size if patch_size is not None else {"height": 16, "width": 16} def snake_case__ ( self : int ) ->Any: '''simple docstring''' return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def snake_case__ ( self : Optional[int] ) ->Tuple: '''simple docstring''' _UpperCamelCase : str = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg" _UpperCamelCase : Optional[int] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert("RGB" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = PixaStructImageProcessor if is_vision_available() else None def snake_case__ ( self : Union[str, Any] ) ->Optional[int]: '''simple docstring''' _UpperCamelCase : Any = PixaStructImageProcessingTester(self ) @property def snake_case__ ( self : Any ) ->Dict: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def snake_case__ ( self : int ) ->Union[str, Any]: '''simple docstring''' _UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase__ , "do_normalize" ) ) self.assertTrue(hasattr(lowercase__ , "do_convert_rgb" ) ) def snake_case__ ( self : int ) ->Tuple: '''simple docstring''' _UpperCamelCase : Union[str, Any] = self.image_processor_tester.prepare_dummy_image() _UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) _UpperCamelCase : int = 2_048 _UpperCamelCase : List[Any] = image_processor(lowercase__ , return_tensors="pt" , max_patches=lowercase__ ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_6_0_6 ) , atol=1e-3 , rtol=1e-3 ) ) def snake_case__ ( self : Optional[Any] ) ->Optional[Any]: '''simple docstring''' _UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ ) for image in image_inputs: self.assertIsInstance(lowercase__ , Image.Image ) # Test not batched input _UpperCamelCase : Tuple = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _UpperCamelCase : Tuple = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowercase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCamelCase : List[str] = image_processor( lowercase__ , return_tensors="pt" , max_patches=lowercase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def snake_case__ ( self : Optional[Any] ) ->Optional[int]: '''simple docstring''' _UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCamelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ ) for image in image_inputs: self.assertIsInstance(lowercase__ , Image.Image ) # Test not batched input _UpperCamelCase : Any = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 _UpperCamelCase : List[str] = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(lowercase__ ): _UpperCamelCase : Any = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowercase__ ).flattened_patches _UpperCamelCase : List[Any] = "Hello" _UpperCamelCase : List[str] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowercase__ , header_text=lowercase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCamelCase : Tuple = image_processor( lowercase__ , return_tensors="pt" , max_patches=lowercase__ , header_text=lowercase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def snake_case__ ( self : List[str] ) ->Optional[Any]: '''simple docstring''' _UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , numpify=lowercase__ ) for image in image_inputs: self.assertIsInstance(lowercase__ , np.ndarray ) _UpperCamelCase : Union[str, Any] = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _UpperCamelCase : Dict = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowercase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCamelCase : Union[str, Any] = image_processor( lowercase__ , return_tensors="pt" , max_patches=lowercase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def snake_case__ ( self : List[str] ) ->Dict: '''simple docstring''' _UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__ ) for image in image_inputs: self.assertIsInstance(lowercase__ , torch.Tensor ) # Test not batched input _UpperCamelCase : Optional[int] = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _UpperCamelCase : Tuple = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowercase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCamelCase : Any = image_processor( lowercase__ , return_tensors="pt" , max_patches=lowercase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = PixaStructImageProcessor if is_vision_available() else None def snake_case__ ( self : Optional[Any] ) ->List[Any]: '''simple docstring''' _UpperCamelCase : int = PixaStructImageProcessingTester(self , num_channels=4 ) _UpperCamelCase : Optional[int] = 3 @property def snake_case__ ( self : Tuple ) ->List[str]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def snake_case__ ( self : Optional[int] ) ->Tuple: '''simple docstring''' _UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase__ , "do_normalize" ) ) self.assertTrue(hasattr(lowercase__ , "do_convert_rgb" ) ) def snake_case__ ( self : List[Any] ) ->Union[str, Any]: '''simple docstring''' _UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCamelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ ) for image in image_inputs: self.assertIsInstance(lowercase__ , Image.Image ) # Test not batched input _UpperCamelCase : str = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _UpperCamelCase : str = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowercase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCamelCase : Tuple = image_processor( lowercase__ , return_tensors="pt" , max_patches=lowercase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
435
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) def __lowerCamelCase ( __lowerCAmelCase : Dict ) -> Any: # initialize config if "resnet-50" in model_name: snake_case = ResNetConfig.from_pretrained("""microsoft/resnet-50""" ) elif "resnet-101" in model_name: snake_case = ResNetConfig.from_pretrained("""microsoft/resnet-101""" ) else: raise ValueError("""Model name should include either resnet50 or resnet101""" ) snake_case = DetrConfig(use_timm_backbone=__SCREAMING_SNAKE_CASE , backbone_config=__SCREAMING_SNAKE_CASE ) # set label attributes snake_case = """panoptic""" in model_name if is_panoptic: snake_case = 2_50 else: snake_case = 91 snake_case = """huggingface/label-files""" snake_case = """coco-detection-id2label.json""" snake_case = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) snake_case = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} snake_case = idalabel snake_case = {v: k for k, v in idalabel.items()} return config, is_panoptic def __lowerCamelCase ( __lowerCAmelCase : Dict ) -> Tuple: # here we list all keys to be renamed (original name on the left, our name on the right) snake_case = [] # stem # fmt: off rename_keys.append(("""backbone.0.body.conv1.weight""", """backbone.conv_encoder.model.embedder.embedder.convolution.weight""") ) rename_keys.append(("""backbone.0.body.bn1.weight""", """backbone.conv_encoder.model.embedder.embedder.normalization.weight""") ) rename_keys.append(("""backbone.0.body.bn1.bias""", """backbone.conv_encoder.model.embedder.embedder.normalization.bias""") ) rename_keys.append(("""backbone.0.body.bn1.running_mean""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_mean""") ) rename_keys.append(("""backbone.0.body.bn1.running_var""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_var""") ) # stages for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): # shortcut if layer_idx == 0: rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''', ) ) # 3 convs for i in range(3 ): rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''', ) ) # fmt: on for i in range(config.encoder_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''', ) ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') ) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''', ) ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ] ) return rename_keys def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ) -> Optional[Any]: snake_case = state_dict.pop(__SCREAMING_SNAKE_CASE ) snake_case = val def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any]=False ) -> Optional[Any]: snake_case = """""" if is_panoptic: snake_case = """detr.""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) snake_case = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) snake_case = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict snake_case = in_proj_weight[:2_56, :] snake_case = in_proj_bias[:2_56] snake_case = in_proj_weight[2_56:5_12, :] snake_case = in_proj_bias[2_56:5_12] snake_case = in_proj_weight[-2_56:, :] snake_case = in_proj_bias[-2_56:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention snake_case = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) snake_case = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict snake_case = in_proj_weight[:2_56, :] snake_case = in_proj_bias[:2_56] snake_case = in_proj_weight[2_56:5_12, :] snake_case = in_proj_bias[2_56:5_12] snake_case = in_proj_weight[-2_56:, :] snake_case = in_proj_bias[-2_56:] # read in weights + bias of input projection layer of cross-attention snake_case = state_dict.pop( F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' ) snake_case = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) of cross-attention to the state dict snake_case = in_proj_weight_cross_attn[:2_56, :] snake_case = in_proj_bias_cross_attn[:2_56] snake_case = in_proj_weight_cross_attn[2_56:5_12, :] snake_case = in_proj_bias_cross_attn[2_56:5_12] snake_case = in_proj_weight_cross_attn[-2_56:, :] snake_case = in_proj_bias_cross_attn[-2_56:] def __lowerCamelCase ( ) -> str: snake_case = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : int=None , __lowerCAmelCase : List[str]=False ) -> str: snake_case , snake_case = get_detr_config(__SCREAMING_SNAKE_CASE ) # load original model from torch hub snake_case = { """detr-resnet-50""": """detr_resnet50""", """detr-resnet-101""": """detr_resnet101""", } logger.info(F'''Converting model {model_name}...''' ) snake_case = torch.hub.load("""facebookresearch/detr""" , model_name_to_original_name[model_name] , pretrained=__SCREAMING_SNAKE_CASE ).eval() snake_case = detr.state_dict() # rename keys for src, dest in create_rename_keys(__SCREAMING_SNAKE_CASE ): if is_panoptic: snake_case = """detr.""" + src rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # query, key and value matrices need special treatment read_in_q_k_v(__SCREAMING_SNAKE_CASE , is_panoptic=__SCREAMING_SNAKE_CASE ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them snake_case = """detr.model.""" if is_panoptic else """model.""" for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("""detr""" ) and not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ) ): snake_case = state_dict.pop(__SCREAMING_SNAKE_CASE ) snake_case = val elif "class_labels_classifier" in key or "bbox_predictor" in key: snake_case = state_dict.pop(__SCREAMING_SNAKE_CASE ) snake_case = val elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ): continue else: snake_case = state_dict.pop(__SCREAMING_SNAKE_CASE ) snake_case = val else: if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ): snake_case = state_dict.pop(__SCREAMING_SNAKE_CASE ) snake_case = val # finally, create HuggingFace model and load state dict snake_case = DetrForSegmentation(__SCREAMING_SNAKE_CASE ) if is_panoptic else DetrForObjectDetection(__SCREAMING_SNAKE_CASE ) model.load_state_dict(__SCREAMING_SNAKE_CASE ) model.eval() # verify our conversion on an image snake_case = """coco_panoptic""" if is_panoptic else """coco_detection""" snake_case = DetrImageProcessor(format=__SCREAMING_SNAKE_CASE ) snake_case = processor(images=prepare_img() , return_tensors="""pt""" ) snake_case = encoding["""pixel_values"""] snake_case = detr(__SCREAMING_SNAKE_CASE ) snake_case = model(__SCREAMING_SNAKE_CASE ) assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-3 ) assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-3 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if push_to_hub: # Upload model and image processor to the hub logger.info("""Uploading PyTorch model and image processor to the hub...""" ) model.push_to_hub(F'''nielsr/{model_name}''' ) processor.push_to_hub(F'''nielsr/{model_name}''' ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument( "--model_name", default="detr-resnet-50", type=str, choices=["detr-resnet-50", "detr-resnet-101"], help="Name of the DETR model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.") _SCREAMING_SNAKE_CASE = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
701
'''simple docstring''' import unittest import numpy as np from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING from transformers.pipelines import AudioClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torchaudio, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case_ = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING snake_case_ = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING def lowerCAmelCase ( self : Tuple , __snake_case : List[str] , __snake_case : List[Any] , __snake_case : List[Any] )-> Tuple: snake_case = AudioClassificationPipeline(model=__snake_case , feature_extractor=__snake_case ) # test with a raw waveform snake_case = np.zeros((3_40_00,) ) snake_case = np.zeros((1_40_00,) ) return audio_classifier, [audioa, audio] def lowerCAmelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : str )-> Any: snake_case , snake_case = examples snake_case = audio_classifier(__snake_case ) # by default a model is initialized with num_labels=2 self.assertEqual( __snake_case , [ {"""score""": ANY(__snake_case ), """label""": ANY(__snake_case )}, {"""score""": ANY(__snake_case ), """label""": ANY(__snake_case )}, ] , ) snake_case = audio_classifier(__snake_case , top_k=1 ) self.assertEqual( __snake_case , [ {"""score""": ANY(__snake_case ), """label""": ANY(__snake_case )}, ] , ) self.run_torchaudio(__snake_case ) @require_torchaudio def lowerCAmelCase ( self : Optional[Any] , __snake_case : Optional[Any] )-> List[Any]: import datasets # test with a local file snake_case = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) snake_case = dataset[0]["""audio"""]["""array"""] snake_case = audio_classifier(__snake_case ) self.assertEqual( __snake_case , [ {"""score""": ANY(__snake_case ), """label""": ANY(__snake_case )}, {"""score""": ANY(__snake_case ), """label""": ANY(__snake_case )}, ] , ) @require_torch def lowerCAmelCase ( self : Tuple )-> Any: snake_case = """anton-l/wav2vec2-random-tiny-classifier""" snake_case = pipeline("""audio-classification""" , model=__snake_case ) snake_case = np.ones((80_00,) ) snake_case = audio_classifier(__snake_case , top_k=4 ) snake_case = [ {"""score""": 0.08_42, """label""": """no"""}, {"""score""": 0.08_38, """label""": """up"""}, {"""score""": 0.08_37, """label""": """go"""}, {"""score""": 0.08_34, """label""": """right"""}, ] snake_case = [ {"""score""": 0.08_45, """label""": """stop"""}, {"""score""": 0.08_44, """label""": """on"""}, {"""score""": 0.08_41, """label""": """right"""}, {"""score""": 0.08_34, """label""": """left"""}, ] self.assertIn(nested_simplify(__snake_case , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) snake_case = {"""array""": np.ones((80_00,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate} snake_case = audio_classifier(__snake_case , top_k=4 ) self.assertIn(nested_simplify(__snake_case , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) @require_torch @slow def lowerCAmelCase ( self : Union[str, Any] )-> Any: import datasets snake_case = """superb/wav2vec2-base-superb-ks""" snake_case = pipeline("""audio-classification""" , model=__snake_case ) snake_case = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" ) snake_case = np.array(dataset[3]["""speech"""] , dtype=np.floataa ) snake_case = audio_classifier(__snake_case , top_k=4 ) self.assertEqual( nested_simplify(__snake_case , decimals=3 ) , [ {"""score""": 0.9_81, """label""": """go"""}, {"""score""": 0.0_07, """label""": """up"""}, {"""score""": 0.0_06, """label""": """_unknown_"""}, {"""score""": 0.0_01, """label""": """down"""}, ] , ) @require_tf @unittest.skip("""Audio classification is not implemented for TF""" ) def lowerCAmelCase ( self : Tuple )-> int: pass
517
0
"""simple docstring""" import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { """vocab_file""": """vocab.json""", """tokenizer_config_file""": """tokenizer_config.json""", """merges_file""": """merges.txt""", } __snake_case = { """vocab_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json""" ), }, """tokenizer_config_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json""" ), }, """merges_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt""" ), }, } __snake_case = """</w>""" __snake_case = """@@ """ def __lowerCAmelCase ( lowercase : Tuple ) -> List[str]: """simple docstring""" snake_case : Union[str, Any] = set() snake_case : int = word[0] for char in word[1:]: pairs.add((prev_char, char) ) snake_case : Tuple = char return pairs # Speech2Text2 has no max input length __snake_case = {"""facebook/s2t-wav2vec2-large-en-de""": 1024} class _lowerCAmelCase ( snake_case_ ): __UpperCAmelCase : Tuple = VOCAB_FILES_NAMES __UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase : Tuple = ['''input_ids''', '''attention_mask'''] def __init__( self , UpperCamelCase__ , UpperCamelCase__="<s>" , UpperCamelCase__="<pad>" , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__=False , UpperCamelCase__=None , **UpperCamelCase__ , ) -> List[Any]: '''simple docstring''' super().__init__( unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , **UpperCamelCase__ , ) snake_case : int = do_lower_case with open(UpperCamelCase__ , encoding="utf-8" ) as vocab_handle: snake_case : int = json.load(UpperCamelCase__ ) snake_case : List[str] = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(F'No merges files provided. {self.__class__.__name__} can only be used for decoding.' ) snake_case : Any = None snake_case : Tuple = None else: with open(UpperCamelCase__ , encoding="utf-8" ) as merges_handle: snake_case : Union[str, Any] = merges_handle.read().split("\n" )[:-1] snake_case : List[str] = [tuple(merge.split()[:2] ) for merge in merges] snake_case : List[str] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) ) snake_case : Dict = {} @property def lowerCamelCase ( self ) -> int: '''simple docstring''' return len(self.decoder ) def lowerCamelCase ( self ) -> Dict: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def lowerCamelCase ( self , UpperCamelCase__ ) -> List[Any]: '''simple docstring''' snake_case : List[Any] = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] snake_case : List[str] = get_pairs(UpperCamelCase__ ) if not pairs: return token while True: snake_case : Dict = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break snake_case ,snake_case : str = bigram snake_case : Tuple = [] snake_case : Optional[int] = 0 while i < len(UpperCamelCase__ ): try: snake_case : Tuple = word.index(UpperCamelCase__ , UpperCamelCase__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) snake_case : str = j if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 snake_case : Any = tuple(UpperCamelCase__ ) snake_case : List[Any] = new_word if len(UpperCamelCase__ ) == 1: break else: snake_case : Optional[int] = get_pairs(UpperCamelCase__ ) snake_case : Optional[int] = " ".join(UpperCamelCase__ ) if word == "\n " + BPE_TOKEN_MERGES: snake_case : List[Any] = "\n" + BPE_TOKEN_MERGES if word.endswith(UpperCamelCase__ ): snake_case : str = word.replace(UpperCamelCase__ , "" ) snake_case : Dict = word.replace(" " , UpperCamelCase__ ) snake_case : List[str] = word return word def lowerCamelCase ( self , UpperCamelCase__ ) -> int: '''simple docstring''' if self.bpe_ranks is None: raise ValueError( "This tokenizer was instantiated without a `merges.txt` file, so" " that it can only be used for decoding, not for encoding." "Make sure to provide `merges.txt` file at instantiation to enable " "encoding." ) if self.do_lower_case: snake_case : int = text.lower() snake_case : Union[str, Any] = text.split() snake_case : List[Any] = [] for token in text: if token: split_tokens.extend(list(self.bpe(UpperCamelCase__ ).split(" " ) ) ) return split_tokens def lowerCamelCase ( self , UpperCamelCase__ ) -> int: '''simple docstring''' return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) ) def lowerCamelCase ( self , UpperCamelCase__ ) -> str: '''simple docstring''' snake_case : Union[str, Any] = self.decoder.get(UpperCamelCase__ , self.unk_token ) return result def lowerCamelCase ( self , UpperCamelCase__ ) -> str: '''simple docstring''' snake_case : Tuple = " ".join(UpperCamelCase__ ) # make sure @@ tokens are concatenated snake_case : Optional[Any] = "".join(string.split(UpperCamelCase__ ) ) return string def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(UpperCamelCase__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return snake_case : Optional[Any] = os.path.join( UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) snake_case : Optional[Any] = os.path.join( UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + "\n" ) snake_case : int = 0 if self.bpe_ranks is None: return (vocab_file,) with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ): if index != token_index: logger.warning( F'Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.' " Please check that the tokenizer is not corrupted!" ) snake_case : Optional[int] = token_index writer.write(" ".join(UpperCamelCase__ ) + "\n" ) index += 1 return (vocab_file, merges_file)
178
"""simple docstring""" import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch __snake_case = logging.get_logger(__name__) class _lowerCAmelCase : def __init__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__=None , UpperCamelCase__=None ) -> Optional[Any]: '''simple docstring''' if not conversation_id: snake_case : Union[str, Any] = uuid.uuida() if past_user_inputs is None: snake_case : Optional[Any] = [] if generated_responses is None: snake_case : Optional[Any] = [] snake_case : uuid.UUID = conversation_id snake_case : List[str] = past_user_inputs snake_case : List[str] = generated_responses snake_case : Optional[str] = text def __eq__( self , UpperCamelCase__ ) -> Any: '''simple docstring''' if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = False ) -> Dict: '''simple docstring''' if self.new_user_input: if overwrite: logger.warning( F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ' F'with: "{text}".' ) snake_case : int = text else: logger.warning( F'User input added while unprocessed input was existing: "{self.new_user_input}" new input ' F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' ) else: snake_case : Any = text def lowerCamelCase ( self ) -> List[str]: '''simple docstring''' if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) snake_case : Dict = None def lowerCamelCase ( self , UpperCamelCase__ ) -> List[str]: '''simple docstring''' self.generated_responses.append(UpperCamelCase__ ) def lowerCamelCase ( self ) -> List[Any]: '''simple docstring''' for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ) -> Union[str, Any]: '''simple docstring''' snake_case : Optional[Any] = F'Conversation id: {self.uuid} \n' for is_user, text in self.iter_texts(): snake_case : List[str] = "user" if is_user else "bot" output += F'{name} >> {text} \n' return output @add_end_docstrings( snake_case_ , R''' min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. ''' , ) class _lowerCAmelCase ( snake_case_ ): def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str: '''simple docstring''' super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) if self.tokenizer.pad_token_id is None: snake_case : str = self.tokenizer.eos_token def lowerCamelCase ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Union[str, Any]: '''simple docstring''' snake_case : Tuple = {} snake_case : Optional[int] = {} snake_case : Optional[Any] = {} if min_length_for_response is not None: snake_case : int = min_length_for_response if minimum_tokens is not None: snake_case : Dict = minimum_tokens if "max_length" in generate_kwargs: snake_case : Any = generate_kwargs["max_length"] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: snake_case : Optional[Any] = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(UpperCamelCase__ ) return preprocess_params, forward_params, postprocess_params def __call__( self , UpperCamelCase__ , UpperCamelCase__=0 , **UpperCamelCase__ ) -> List[str]: '''simple docstring''' snake_case : int = super().__call__(UpperCamelCase__ , num_workers=UpperCamelCase__ , **UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) == 1: return outputs[0] return outputs def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__=32 ) -> Dict[str, Any]: '''simple docstring''' if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError("ConversationalPipeline, expects Conversation as inputs" ) if conversation.new_user_input is None: raise ValueError( F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. ' "Add user inputs with the conversation's `add_user_input` method" ) if hasattr(self.tokenizer , "_build_conversation_input_ids" ): snake_case : Optional[Any] = self.tokenizer._build_conversation_input_ids(UpperCamelCase__ ) else: # If the tokenizer cannot handle conversations, we default to only the old version snake_case : Optional[Any] = self._legacy_parse_and_tokenize(UpperCamelCase__ ) if self.framework == "pt": snake_case : Union[str, Any] = torch.LongTensor([input_ids] ) elif self.framework == "tf": snake_case : Optional[int] = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__=10 , **UpperCamelCase__ ) -> Union[str, Any]: '''simple docstring''' snake_case : Dict = generate_kwargs.get("max_length" , self.model.config.max_length ) snake_case : Optional[Any] = model_inputs["input_ids"].shape[1] if max_length - minimum_tokens < n: logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' ) snake_case : List[str] = max_length - minimum_tokens snake_case : Dict = model_inputs["input_ids"][:, -trim:] if "attention_mask" in model_inputs: snake_case : List[Any] = model_inputs["attention_mask"][:, -trim:] snake_case : Union[str, Any] = model_inputs.pop("conversation" ) snake_case : Union[str, Any] = max_length snake_case : Any = self.model.generate(**UpperCamelCase__ , **UpperCamelCase__ ) if self.model.config.is_encoder_decoder: snake_case : Optional[int] = 1 else: snake_case : int = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__=True ) -> Tuple: '''simple docstring''' snake_case : Union[str, Any] = model_outputs["output_ids"] snake_case : str = self.tokenizer.decode( output_ids[0] , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ , ) snake_case : List[Any] = model_outputs["conversation"] conversation.mark_processed() conversation.append_response(UpperCamelCase__ ) return conversation def lowerCamelCase ( self , UpperCamelCase__ ) -> Dict: '''simple docstring''' snake_case : str = self.tokenizer.eos_token_id snake_case : str = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) ) if len(UpperCamelCase__ ) > self.tokenizer.model_max_length: snake_case : str = input_ids[-self.tokenizer.model_max_length :] return input_ids
178
1
"""simple docstring""" import operator as op def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] ) ->Any: '''simple docstring''' a : str = [] a : List[str] = lambda _lowercase , _lowercase : int(x / y ) # noqa: E731 integer division operation a : Union[str, Any] = { "^": op.pow, "*": op.mul, "/": div, "+": op.add, "-": op.sub, } # operators & their respective operation # print table header print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " ) print("-" * (30 + len(_lowercase )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(_lowercase ) # append x to stack # output in tabular format print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(_lowercase ) , sep=" | " ) else: a : Optional[Any] = stack.pop() # pop stack # output in tabular format print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(_lowercase ) , sep=" | " ) a : List[str] = stack.pop() # pop stack # output in tabular format print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(_lowercase ) , sep=" | " ) stack.append( str(opr[x](int(_lowercase ) , int(_lowercase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(_lowercase ) , sep=" | " , ) return int(stack[0] ) if __name__ == "__main__": a : Optional[Any] = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''') print('''\n\tResult = ''', solve(Postfix))
31
"""simple docstring""" # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import subprocess from packaging.version import Version, parse from accelerate.commands.config.config_args import default_config_file, load_config_from_file a : Optional[int] = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.''' def _SCREAMING_SNAKE_CASE ( _lowercase : Any=None ) ->Optional[Any]: '''simple docstring''' if subparsers is not None: a : int = subparsers.add_parser("tpu-config" , description=_description ) else: a : List[Any] = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description ) # Core arguments a : Dict = parser.add_argument_group( "Config Arguments" , "Arguments that can be configured through `accelerate config`." ) config_args.add_argument( "--config_file" , type=_lowercase , default=_lowercase , help="Path to the config file to use for accelerate." , ) config_args.add_argument( "--tpu_name" , default=_lowercase , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , ) config_args.add_argument( "--tpu_zone" , default=_lowercase , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , ) a : Any = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." ) pod_args.add_argument( "--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , ) pod_args.add_argument( "--command_file" , default=_lowercase , help="The path to the file containing the commands to run on the pod on startup." , ) pod_args.add_argument( "--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , ) pod_args.add_argument( "--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , ) pod_args.add_argument( "--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , ) pod_args.add_argument( "--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." ) if subparsers is not None: parser.set_defaults(func=_lowercase ) return parser def _SCREAMING_SNAKE_CASE ( _lowercase : Any ) ->Tuple: '''simple docstring''' a : Union[str, Any] = None # Get the default from the config file if it exists. if args.config_file is not None or os.path.isfile(_lowercase ): a : Optional[Any] = load_config_from_file(args.config_file ) if not args.command_file and defaults.command_file is not None and not args.command: a : int = defaults.command_file if not args.command and defaults.commands is not None: a : Union[str, Any] = defaults.commands if not args.tpu_name: a : int = defaults.tpu_name if not args.tpu_zone: a : Union[str, Any] = defaults.tpu_zone if args.accelerate_version == "dev": a : int = "git+https://github.com/huggingface/accelerate.git" elif args.accelerate_version == "latest": a : Optional[Any] = "accelerate -U" elif isinstance(parse(args.accelerate_version ) , _lowercase ): a : Optional[Any] = F"""accelerate=={args.accelerate_version}""" if not args.command_file and not args.command: raise ValueError("You must specify either a command file or a command to run on the pod." ) if args.command_file: with open(args.command_file , "r" ) as f: a : int = [f.read().splitlines()] # To turn list of lists into list of strings if isinstance(args.command[0] , _lowercase ): a : Union[str, Any] = [line for cmd in args.command for line in cmd] # Default to the shared folder and install accelerate a : Tuple = ["cd /usr/share"] if args.install_accelerate: new_cmd += [F"""pip install {args.accelerate_version}"""] new_cmd += args.command a : List[Any] = "; ".join(_lowercase ) # Then send it to gcloud # Eventually try to use google-api-core to do this instead of subprocess a : str = ["gcloud"] if args.use_alpha: cmd += ["alpha"] cmd += [ "compute", "tpus", "tpu-vm", "ssh", args.tpu_name, "--zone", args.tpu_zone, "--command", args.command, "--worker", "all", ] if args.debug: print(F"""Running {' '.join(_lowercase )}""" ) return subprocess.run(_lowercase ) print("Successfully setup pod." ) def _SCREAMING_SNAKE_CASE ( ) ->Tuple: '''simple docstring''' a : List[Any] = tpu_command_parser() a : Optional[int] = parser.parse_args() tpu_command_launcher(_lowercase )
31
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) snake_case = { """configuration_albert""": ["""ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AlbertConfig""", """AlbertOnnxConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""AlbertTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""AlbertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """AlbertForMaskedLM""", """AlbertForMultipleChoice""", """AlbertForPreTraining""", """AlbertForQuestionAnswering""", """AlbertForSequenceClassification""", """AlbertForTokenClassification""", """AlbertModel""", """AlbertPreTrainedModel""", """load_tf_weights_in_albert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFAlbertForMaskedLM""", """TFAlbertForMultipleChoice""", """TFAlbertForPreTraining""", """TFAlbertForQuestionAnswering""", """TFAlbertForSequenceClassification""", """TFAlbertForTokenClassification""", """TFAlbertMainLayer""", """TFAlbertModel""", """TFAlbertPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """FlaxAlbertForMaskedLM""", """FlaxAlbertForMultipleChoice""", """FlaxAlbertForPreTraining""", """FlaxAlbertForQuestionAnswering""", """FlaxAlbertForSequenceClassification""", """FlaxAlbertForTokenClassification""", """FlaxAlbertModel""", """FlaxAlbertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert import AlbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert_fast import AlbertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_albert import ( ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, AlbertPreTrainedModel, load_tf_weights_in_albert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_albert import ( TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFAlbertForMaskedLM, TFAlbertForMultipleChoice, TFAlbertForPreTraining, TFAlbertForQuestionAnswering, TFAlbertForSequenceClassification, TFAlbertForTokenClassification, TFAlbertMainLayer, TFAlbertModel, TFAlbertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, FlaxAlbertPreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
67
'''simple docstring''' from collections import defaultdict def UpperCAmelCase ( A : int ): SCREAMING_SNAKE_CASE : List[Any] = 1 SCREAMING_SNAKE_CASE : Dict = True for v in tree[start]: if v not in visited: ret += dfs(A ) if ret % 2 == 0: cuts.append(A ) return ret def UpperCAmelCase ( ): dfs(1 ) if __name__ == "__main__": lowerCAmelCase_ , lowerCAmelCase_ : Dict = 10, 9 lowerCAmelCase_ : Dict = defaultdict(list) lowerCAmelCase_ : dict[int, bool] = {} lowerCAmelCase_ : list[int] = [] lowerCAmelCase_ : Optional[Any] = 0 lowerCAmelCase_ : Dict = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
527
0
"""simple docstring""" def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ): '''simple docstring''' if number < 0 or shift_amount < 0: raise ValueError('both inputs must be positive integers' ) lowercase__ : Optional[Any] = str(bin(_lowerCAmelCase ) ) binary_number += "0" * shift_amount return binary_number def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ): '''simple docstring''' if number < 0 or shift_amount < 0: raise ValueError('both inputs must be positive integers' ) lowercase__ : Optional[Any] = str(bin(_lowerCAmelCase ) )[2:] if shift_amount >= len(_lowerCAmelCase ): return "0b0" lowercase__ : int = binary_number[: len(_lowerCAmelCase ) - shift_amount] return "0b" + shifted_binary_number def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ): '''simple docstring''' if number >= 0: # Get binary representation of positive number lowercase__ : Tuple = '0' + str(bin(_lowerCAmelCase ) ).strip('-' )[2:] else: # Get binary (2's complement) representation of negative number lowercase__ : Tuple = len(bin(_lowerCAmelCase )[3:] ) # Find 2's complement of number lowercase__ : str = bin(abs(_lowerCAmelCase ) - (1 << binary_number_length) )[3:] lowercase__ : Union[str, Any] = ( '1' + '0' * (binary_number_length - len(_lowerCAmelCase )) + binary_number ) if shift_amount >= len(_lowerCAmelCase ): return "0b" + binary_number[0] * len(_lowerCAmelCase ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(_lowerCAmelCase ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
645
"""simple docstring""" import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() _UpperCamelCase : Dict = logging.get_logger(__name__) _UpperCamelCase : List[Any] = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "encoder.layer_norm_for_extract": "layer_norm_for_extract", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "label_embs_concat": "label_embeddings_concat", "mask_emb": "masked_spec_embed", "spk_proj": "speaker_proj", } _UpperCamelCase : List[str] = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "label_embeddings_concat", "speaker_proj", "layer_norm_for_extract", ] def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ): '''simple docstring''' for attribute in key.split('.' ): lowercase__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase ) if weight_type is not None: lowercase__ : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape else: lowercase__ : Optional[int] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowercase__ : Optional[Any] = value elif weight_type == "weight_g": lowercase__ : Dict = value elif weight_type == "weight_v": lowercase__ : List[str] = value elif weight_type == "bias": lowercase__ : Optional[Any] = value else: lowercase__ : List[str] = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ): '''simple docstring''' lowercase__ : Tuple = [] lowercase__ : List[str] = fairseq_model.state_dict() lowercase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): lowercase__ : Optional[int] = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , ) lowercase__ : Optional[Any] = True else: for key, mapped_key in MAPPING.items(): lowercase__ : List[Any] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key): # special case since naming is very similar continue lowercase__ : int = True if "*" in mapped_key: lowercase__ : Optional[int] = name.split(_lowerCAmelCase )[0].split('.' )[-2] lowercase__ : List[str] = mapped_key.replace('*' , _lowerCAmelCase ) if "weight_g" in name: lowercase__ : List[Any] = 'weight_g' elif "weight_v" in name: lowercase__ : int = 'weight_v' elif "bias" in name: lowercase__ : Dict = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj lowercase__ : Union[str, Any] = 'weight' else: lowercase__ : int = None set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) continue if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(f"""Unused weights: {unused_weights}""" ) def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Dict ): '''simple docstring''' lowercase__ : int = full_name.split('conv_layers.' )[-1] lowercase__ : int = name.split('.' ) lowercase__ : int = int(items[0] ) lowercase__ : Dict = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) lowercase__ : Union[str, Any] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) lowercase__ : Optional[int] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) lowercase__ : List[Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) lowercase__ : int = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_lowerCAmelCase ) @torch.no_grad() def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Tuple=True ): '''simple docstring''' if config_path is not None: lowercase__ : Any = UniSpeechSatConfig.from_pretrained(_lowerCAmelCase ) else: lowercase__ : Any = UniSpeechSatConfig() lowercase__ : Union[str, Any] = '' if is_finetuned: lowercase__ : Optional[Any] = UniSpeechSatForCTC(_lowerCAmelCase ) else: lowercase__ : List[Any] = UniSpeechSatForPreTraining(_lowerCAmelCase ) lowercase__ , lowercase__ , lowercase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) lowercase__ : Union[str, Any] = model[0].eval() recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase ) hf_wavavec.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": _UpperCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) _UpperCamelCase : str = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
645
1
import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration UpperCAmelCase__ : str = 5_00_00 UpperCAmelCase__ : int = 50_00 UpperCAmelCase__ , UpperCAmelCase__ : Tuple = os.path.split(__file__) UpperCAmelCase__ : Optional[Any] = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def A ( snake_case__ : str , snake_case__ : Dict ) -> Tuple: '''simple docstring''' for i in range(snake_case__ ): __snake_case = dataset[i] @get_duration def A ( snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : int ) -> List[Any]: '''simple docstring''' for i in range(0 , len(snake_case__ ) , snake_case__ ): __snake_case = dataset[i : i + batch_size] @get_duration def A ( snake_case__ : Dict , snake_case__ : str , snake_case__ : int ) -> List[str]: '''simple docstring''' with dataset.formatted_as(type=snake_case__ ): for i in range(snake_case__ ): __snake_case = dataset[i] @get_duration def A ( snake_case__ : str , snake_case__ : Tuple , snake_case__ : str , snake_case__ : List[str] ) -> Union[str, Any]: '''simple docstring''' with dataset.formatted_as(type=snake_case__ ): for i in range(0 , snake_case__ , snake_case__ ): __snake_case = dataset[i : i + batch_size] def A ( ) -> Optional[int]: '''simple docstring''' __snake_case = {"""num examples""": SPEED_TEST_N_EXAMPLES} __snake_case = [ (read, {"""length""": SMALL_TEST}), (read, {"""length""": SPEED_TEST_N_EXAMPLES}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}), (read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}), (read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}), (read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}), (read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}), (read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}), (read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}), ] __snake_case = [ (read, {"""length""": SMALL_TEST}), (read, {"""length""": SPEED_TEST_N_EXAMPLES}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}), (read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}), (read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}), (read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('generating dataset' ) __snake_case = datasets.Features( {'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} ) __snake_case = generate_example_dataset( os.path.join(snake_case__ , 'dataset.arrow' ) , snake_case__ , num_examples=snake_case__ , seq_shapes={'list': (100,)} , ) print('first set of iterations' ) for func, kwargs in functions: print(func.__name__ , str(snake_case__ ) ) __snake_case = func(snake_case__ , **snake_case__ ) print('shuffling dataset' ) __snake_case = dataset.shuffle() print('Second set of iterations (after shuffling' ) for func, kwargs in functions_shuffled: print('shuffled ' , func.__name__ , str(snake_case__ ) ) __snake_case = func( snake_case__ , **snake_case__ ) with open(snake_case__ , 'wb' ) as f: f.write(json.dumps(snake_case__ ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
313
"""simple docstring""" import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version(""">=""", FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType _UpperCAmelCase = get_logger(__name__) def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase=0 ): os.makedirs(lowercase , exist_ok=lowercase ) with FSDP.state_dict_type( lowercase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): SCREAMING_SNAKE_CASE_: List[Any] =model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: SCREAMING_SNAKE_CASE_: str =f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin''' SCREAMING_SNAKE_CASE_: List[Any] =os.path.join(lowercase , lowercase ) if accelerator.process_index == 0: logger.info(f'''Saving model to {output_model_file}''' ) torch.save(lowercase , lowercase ) logger.info(f'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: SCREAMING_SNAKE_CASE_: Dict =( f'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) SCREAMING_SNAKE_CASE_: int =os.path.join(lowercase , lowercase ) logger.info(f'''Saving model to {output_model_file}''' ) torch.save(lowercase , lowercase ) logger.info(f'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: SCREAMING_SNAKE_CASE_: Dict =os.path.join(lowercase , f'''{MODEL_NAME}_{model_index}''' ) os.makedirs(lowercase , exist_ok=lowercase ) logger.info(f'''Saving model to {ckpt_dir}''' ) SCREAMING_SNAKE_CASE_: Dict ={"""model""": state_dict} dist_cp.save_state_dict( state_dict=lowercase , storage_writer=dist_cp.FileSystemWriter(lowercase ) , planner=DefaultSavePlanner() , ) logger.info(f'''Model saved to {ckpt_dir}''' ) def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( lowercase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(lowercase ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( """Set the `sync_module_states` flag to `True` so that model states are synced across processes when """ """initializing FSDP object""" ) return SCREAMING_SNAKE_CASE_: List[Any] =f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin''' SCREAMING_SNAKE_CASE_: int =os.path.join(lowercase , lowercase ) logger.info(f'''Loading model from {input_model_file}''' ) SCREAMING_SNAKE_CASE_: List[Any] =torch.load(lowercase ) logger.info(f'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: SCREAMING_SNAKE_CASE_: Dict =( f'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) SCREAMING_SNAKE_CASE_: Dict =os.path.join(lowercase , lowercase ) logger.info(f'''Loading model from {input_model_file}''' ) SCREAMING_SNAKE_CASE_: int =torch.load(lowercase ) logger.info(f'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: SCREAMING_SNAKE_CASE_: Optional[Any] =( os.path.join(lowercase , f'''{MODEL_NAME}_{model_index}''' ) if f'''{MODEL_NAME}''' not in input_dir else input_dir ) logger.info(f'''Loading model from {ckpt_dir}''' ) SCREAMING_SNAKE_CASE_: List[Any] ={"""model""": model.state_dict()} dist_cp.load_state_dict( state_dict=lowercase , storage_reader=dist_cp.FileSystemReader(lowercase ) , planner=DefaultLoadPlanner() , ) SCREAMING_SNAKE_CASE_: Optional[Any] =state_dict["""model"""] logger.info(f'''Model loaded from {ckpt_dir}''' ) model.load_state_dict(lowercase ) def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=0 ): os.makedirs(lowercase , exist_ok=lowercase ) with FSDP.state_dict_type( lowercase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): SCREAMING_SNAKE_CASE_: Optional[int] =FSDP.optim_state_dict(lowercase , lowercase ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: SCREAMING_SNAKE_CASE_: Optional[int] =( f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) SCREAMING_SNAKE_CASE_: Tuple =os.path.join(lowercase , lowercase ) logger.info(f'''Saving Optimizer state to {output_optimizer_file}''' ) torch.save(lowercase , lowercase ) logger.info(f'''Optimizer state saved in {output_optimizer_file}''' ) else: SCREAMING_SNAKE_CASE_: Dict =os.path.join(lowercase , f'''{OPTIMIZER_NAME}_{optimizer_index}''' ) os.makedirs(lowercase , exist_ok=lowercase ) logger.info(f'''Saving Optimizer state to {ckpt_dir}''' ) dist_cp.save_state_dict( state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(lowercase ) , planner=DefaultSavePlanner() , ) logger.info(f'''Optimizer state saved in {ckpt_dir}''' ) def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( lowercase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: SCREAMING_SNAKE_CASE_: int =None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: SCREAMING_SNAKE_CASE_: Tuple =( f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) SCREAMING_SNAKE_CASE_: Union[str, Any] =os.path.join(lowercase , lowercase ) logger.info(f'''Loading Optimizer state from {input_optimizer_file}''' ) SCREAMING_SNAKE_CASE_: Optional[Any] =torch.load(lowercase ) logger.info(f'''Optimizer state loaded from {input_optimizer_file}''' ) else: SCREAMING_SNAKE_CASE_: str =( os.path.join(lowercase , f'''{OPTIMIZER_NAME}_{optimizer_index}''' ) if f'''{OPTIMIZER_NAME}''' not in input_dir else input_dir ) logger.info(f'''Loading Optimizer from {ckpt_dir}''' ) SCREAMING_SNAKE_CASE_: Any =load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(lowercase ) , ) SCREAMING_SNAKE_CASE_: Any =optim_state["""optimizer"""] logger.info(f'''Optimizer loaded from {ckpt_dir}''' ) SCREAMING_SNAKE_CASE_: Tuple =FSDP.optim_state_dict_to_load(lowercase , lowercase , lowercase ) optimizer.load_state_dict(lowercase )
409
0
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = ['image_processor', 'tokenizer'] UpperCamelCase = 'CLIPImageProcessor' UpperCamelCase = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self : Union[str, Any] , A_ : str=None , A_ : Union[str, Any]=None , **A_ : Optional[int] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , UpperCAmelCase__ , ) lowerCamelCase_ = kwargs.pop('feature_extractor' ) lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(UpperCAmelCase__ , UpperCAmelCase__ ) def __call__( self : Any , A_ : Dict=None , A_ : Union[str, Any]=None , A_ : int=None , **A_ : Union[str, Any] ) -> int: """simple docstring""" if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: lowerCamelCase_ = self.tokenizer(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ ) if images is not None: lowerCamelCase_ = self.image_processor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ ) if text is not None and images is not None: lowerCamelCase_ = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase__ ) , tensor_type=UpperCAmelCase__ ) def a__ ( self : List[str] , *A_ : Dict , **A_ : int ) -> List[Any]: """simple docstring""" return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ ) def a__ ( self : Optional[int] , *A_ : Optional[Any] , **A_ : str ) -> Optional[Any]: """simple docstring""" return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ ) @property def a__ ( self : Tuple ) -> Dict: """simple docstring""" lowerCamelCase_ = self.tokenizer.model_input_names lowerCamelCase_ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def a__ ( self : Tuple ) -> Any: """simple docstring""" warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCAmelCase__ , ) return self.image_processor_class @property def a__ ( self : Any ) -> str: """simple docstring""" warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCAmelCase__ , ) return self.image_processor
718
from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class A: '''simple docstring''' UpperCamelCase = 42 UpperCamelCase = None UpperCamelCase = None lowerCamelCase : str = namedtuple("CoinsDistribResult", "moves excess") def _SCREAMING_SNAKE_CASE ( lowercase : TreeNode | None ): '''simple docstring''' if root is None: return 0 # Validation def count_nodes(lowercase : TreeNode | None ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(lowercase : TreeNode | None ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(lowercase ) != count_coins(lowercase ): raise ValueError('The nodes number should be same as the number of coins' ) # Main calculation def get_distrib(lowercase : TreeNode | None ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.left ) lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.right ) lowerCamelCase_ = 1 - left_distrib_excess lowerCamelCase_ = 1 - right_distrib_excess lowerCamelCase_ = ( left_distrib_moves + right_distrib_moves + abs(lowercase ) + abs(lowercase ) ) lowerCamelCase_ = node.data - coins_to_left - coins_to_right return CoinsDistribResult(lowercase , lowercase ) return get_distrib(lowercase )[0] if __name__ == "__main__": import doctest doctest.testmod()
651
0
import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = { '''task_specific_params''': { '''summarization''': {'''length_penalty''': 1.0, '''max_length''': 128, '''min_length''': 12, '''num_beams''': 4}, '''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 142, '''min_length''': 56, '''num_beams''': 4}, '''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6}, } } __lowerCamelCase = { '''task_specific_params.summarization.length_penalty''': 1.0, '''task_specific_params.summarization.max_length''': 128, '''task_specific_params.summarization.min_length''': 12, '''task_specific_params.summarization.num_beams''': 4, '''task_specific_params.summarization_cnn.length_penalty''': 2.0, '''task_specific_params.summarization_cnn.max_length''': 142, '''task_specific_params.summarization_cnn.min_length''': 56, '''task_specific_params.summarization_cnn.num_beams''': 4, '''task_specific_params.summarization_xsum.length_penalty''': 1.0, '''task_specific_params.summarization_xsum.max_length''': 62, '''task_specific_params.summarization_xsum.min_length''': 11, '''task_specific_params.summarization_xsum.num_beams''': 6, } self.assertEqual(flatten_dict(__UpperCAmelCase ) , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(__UpperCAmelCase ) , x.transpose() ) ) __lowerCamelCase = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(__UpperCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = np.random.randn(3 , 4 ) __lowerCamelCase = torch.tensor(__UpperCAmelCase ) self.assertTrue(np.allclose(transpose(__UpperCAmelCase ) , transpose(__UpperCAmelCase ).numpy() ) ) __lowerCamelCase = np.random.randn(3 , 4 , 5 ) __lowerCamelCase = torch.tensor(__UpperCAmelCase ) self.assertTrue(np.allclose(transpose(__UpperCAmelCase , axes=(1, 2, 0) ) , transpose(__UpperCAmelCase , axes=(1, 2, 0) ).numpy() ) ) @require_tf def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = np.random.randn(3 , 4 ) __lowerCamelCase = tf.constant(__UpperCAmelCase ) self.assertTrue(np.allclose(transpose(__UpperCAmelCase ) , transpose(__UpperCAmelCase ).numpy() ) ) __lowerCamelCase = np.random.randn(3 , 4 , 5 ) __lowerCamelCase = tf.constant(__UpperCAmelCase ) self.assertTrue(np.allclose(transpose(__UpperCAmelCase , axes=(1, 2, 0) ) , transpose(__UpperCAmelCase , axes=(1, 2, 0) ).numpy() ) ) @require_flax def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = np.random.randn(3 , 4 ) __lowerCamelCase = jnp.array(__UpperCAmelCase ) self.assertTrue(np.allclose(transpose(__UpperCAmelCase ) , np.asarray(transpose(__UpperCAmelCase ) ) ) ) __lowerCamelCase = np.random.randn(3 , 4 , 5 ) __lowerCamelCase = jnp.array(__UpperCAmelCase ) self.assertTrue(np.allclose(transpose(__UpperCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(__UpperCAmelCase , axes=(1, 2, 0) ) ) ) ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (4, 3) ) , np.reshape(__UpperCAmelCase , (4, 3) ) ) ) __lowerCamelCase = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (12, 5) ) , np.reshape(__UpperCAmelCase , (12, 5) ) ) ) @require_torch def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = np.random.randn(3 , 4 ) __lowerCamelCase = torch.tensor(__UpperCAmelCase ) self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (4, 3) ) , reshape(__UpperCAmelCase , (4, 3) ).numpy() ) ) __lowerCamelCase = np.random.randn(3 , 4 , 5 ) __lowerCamelCase = torch.tensor(__UpperCAmelCase ) self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (12, 5) ) , reshape(__UpperCAmelCase , (12, 5) ).numpy() ) ) @require_tf def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = np.random.randn(3 , 4 ) __lowerCamelCase = tf.constant(__UpperCAmelCase ) self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (4, 3) ) , reshape(__UpperCAmelCase , (4, 3) ).numpy() ) ) __lowerCamelCase = np.random.randn(3 , 4 , 5 ) __lowerCamelCase = tf.constant(__UpperCAmelCase ) self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (12, 5) ) , reshape(__UpperCAmelCase , (12, 5) ).numpy() ) ) @require_flax def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = np.random.randn(3 , 4 ) __lowerCamelCase = jnp.array(__UpperCAmelCase ) self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (4, 3) ) , np.asarray(reshape(__UpperCAmelCase , (4, 3) ) ) ) ) __lowerCamelCase = np.random.randn(3 , 4 , 5 ) __lowerCamelCase = jnp.array(__UpperCAmelCase ) self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (12, 5) ) , np.asarray(reshape(__UpperCAmelCase , (12, 5) ) ) ) ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(__UpperCAmelCase ) , np.squeeze(__UpperCAmelCase ) ) ) __lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(__UpperCAmelCase , axis=2 ) , np.squeeze(__UpperCAmelCase , axis=2 ) ) ) @require_torch def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = np.random.randn(1 , 3 , 4 ) __lowerCamelCase = torch.tensor(__UpperCAmelCase ) self.assertTrue(np.allclose(squeeze(__UpperCAmelCase ) , squeeze(__UpperCAmelCase ).numpy() ) ) __lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 ) __lowerCamelCase = torch.tensor(__UpperCAmelCase ) self.assertTrue(np.allclose(squeeze(__UpperCAmelCase , axis=2 ) , squeeze(__UpperCAmelCase , axis=2 ).numpy() ) ) @require_tf def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = np.random.randn(1 , 3 , 4 ) __lowerCamelCase = tf.constant(__UpperCAmelCase ) self.assertTrue(np.allclose(squeeze(__UpperCAmelCase ) , squeeze(__UpperCAmelCase ).numpy() ) ) __lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 ) __lowerCamelCase = tf.constant(__UpperCAmelCase ) self.assertTrue(np.allclose(squeeze(__UpperCAmelCase , axis=2 ) , squeeze(__UpperCAmelCase , axis=2 ).numpy() ) ) @require_flax def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = np.random.randn(1 , 3 , 4 ) __lowerCamelCase = jnp.array(__UpperCAmelCase ) self.assertTrue(np.allclose(squeeze(__UpperCAmelCase ) , np.asarray(squeeze(__UpperCAmelCase ) ) ) ) __lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 ) __lowerCamelCase = jnp.array(__UpperCAmelCase ) self.assertTrue(np.allclose(squeeze(__UpperCAmelCase , axis=2 ) , np.asarray(squeeze(__UpperCAmelCase , axis=2 ) ) ) ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(__UpperCAmelCase , axis=1 ) , np.expand_dims(__UpperCAmelCase , axis=1 ) ) ) @require_torch def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = np.random.randn(3 , 4 ) __lowerCamelCase = torch.tensor(__UpperCAmelCase ) self.assertTrue(np.allclose(expand_dims(__UpperCAmelCase , axis=1 ) , expand_dims(__UpperCAmelCase , axis=1 ).numpy() ) ) @require_tf def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = np.random.randn(3 , 4 ) __lowerCamelCase = tf.constant(__UpperCAmelCase ) self.assertTrue(np.allclose(expand_dims(__UpperCAmelCase , axis=1 ) , expand_dims(__UpperCAmelCase , axis=1 ).numpy() ) ) @require_flax def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = np.random.randn(3 , 4 ) __lowerCamelCase = jnp.array(__UpperCAmelCase ) self.assertTrue(np.allclose(expand_dims(__UpperCAmelCase , axis=1 ) , np.asarray(expand_dims(__UpperCAmelCase , axis=1 ) ) ) )
175
import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=64 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=1 , ): '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_input_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_labels __lowerCamelCase = num_choices __lowerCamelCase = scope __lowerCamelCase = q_groups __lowerCamelCase = k_groups __lowerCamelCase = v_groups __lowerCamelCase = post_attention_groups __lowerCamelCase = intermediate_groups __lowerCamelCase = output_groups def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCamelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase ( self ): '''simple docstring''' return SqueezeBertConfig( embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = SqueezeBertModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = SqueezeBertForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = SqueezeBertForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = SqueezeBertForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = SqueezeBertForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_choices __lowerCamelCase = SqueezeBertForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() ((__lowerCamelCase) ,(__lowerCamelCase) ,(__lowerCamelCase) ,(__lowerCamelCase) ,(__lowerCamelCase) ,(__lowerCamelCase)) = config_and_inputs __lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) lowerCAmelCase__ = ( { """feature-extraction""": SqueezeBertModel, """fill-mask""": SqueezeBertForMaskedLM, """question-answering""": SqueezeBertForQuestionAnswering, """text-classification""": SqueezeBertForSequenceClassification, """token-classification""": SqueezeBertForTokenClassification, """zero-shot""": SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = True lowerCAmelCase__ = False def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = SqueezeBertModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , dim=37 ) def lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*__UpperCAmelCase ) @slow def lowerCamelCase ( self ): '''simple docstring''' for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = SqueezeBertModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_torch class __lowerCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' ) __lowerCamelCase = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] ) __lowerCamelCase = model(__UpperCAmelCase )[0] __lowerCamelCase = torch.Size((1, 3) ) self.assertEqual(output.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor([[0.6_401, -0.0_349, -0.6_041]] ) self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-4 ) )
175
1
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : float , UpperCamelCase : float ) -> float: """simple docstring""" if density <= 0: raise ValueError("""Impossible fluid density""" ) if bulk_modulus <= 0: raise ValueError("""Impossible bulk modulus""" ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
708
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : int ) -> int: """simple docstring""" a_ = 1 for i in range(1 , num + 1 ): fact *= i return fact def __SCREAMING_SNAKE_CASE ( UpperCamelCase : int ) -> int: """simple docstring""" a_ = 0 while number > 0: a_ = number % 10 sum_of_digits += last_digit a_ = number // 10 # Removing the last_digit from the given number return sum_of_digits def __SCREAMING_SNAKE_CASE ( UpperCamelCase : int = 100 ) -> int: """simple docstring""" a_ = factorial(UpperCamelCase ) a_ = split_and_add(UpperCamelCase ) return result if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
403
0
'''simple docstring''' import socket def _lowercase ( ) -> Dict: """simple docstring""" __UpperCAmelCase : List[str] = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) __UpperCAmelCase : Dict = socket.gethostname() __UpperCAmelCase : Optional[Any] = 1_2312 sock.connect((host, port) ) sock.send(b"Hello server!" ) with open("Received_file" , "wb" ) as out_file: print("File opened" ) print("Receiving data..." ) while True: __UpperCAmelCase : List[Any] = sock.recv(1024 ) if not data: break out_file.write(lowerCamelCase__ ) print("Successfully received the file" ) sock.close() print("Connection closed" ) if __name__ == "__main__": main()
168
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging _a : Optional[int] = logging.get_logger(__name__) _a : List[str] = { "Visual-Attention-Network/van-base": ( "https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json" ), } class __A (__magic_name__ ): snake_case :List[str] = "van" def __init__( self , UpperCamelCase_=2_24 , UpperCamelCase_=3 , UpperCamelCase_=[7, 3, 3, 3] , UpperCamelCase_=[4, 2, 2, 2] , UpperCamelCase_=[64, 1_28, 3_20, 5_12] , UpperCamelCase_=[3, 3, 12, 3] , UpperCamelCase_=[8, 8, 4, 4] , UpperCamelCase_="gelu" , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-6 , UpperCamelCase_=1E-2 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , **UpperCamelCase_ , ): super().__init__(**UpperCamelCase_ ) __UpperCAmelCase : List[Any] = image_size __UpperCAmelCase : Dict = num_channels __UpperCAmelCase : Optional[Any] = patch_sizes __UpperCAmelCase : Tuple = strides __UpperCAmelCase : Any = hidden_sizes __UpperCAmelCase : str = depths __UpperCAmelCase : Optional[Any] = mlp_ratios __UpperCAmelCase : Union[str, Any] = hidden_act __UpperCAmelCase : int = initializer_range __UpperCAmelCase : Dict = layer_norm_eps __UpperCAmelCase : int = layer_scale_init_value __UpperCAmelCase : Optional[int] = drop_path_rate __UpperCAmelCase : str = dropout_rate
168
1
'''simple docstring''' from typing import Dict from .base import GenericTensor, Pipeline class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def A_ ( self : Optional[int] , __a : Optional[int]=None , __a : Optional[int]=None , __a : Union[str, Any]=None , **__a : int ) -> Dict: '''simple docstring''' if tokenize_kwargs is None: __snake_case : List[str] = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) __snake_case : List[Any] = truncation __snake_case : Tuple = tokenize_kwargs __snake_case : Optional[int] = {} if return_tensors is not None: __snake_case : str = return_tensors return preprocess_params, {}, postprocess_params def A_ ( self : Tuple , __a : Optional[int] , **__a : int ) -> Dict[str, GenericTensor]: '''simple docstring''' __snake_case : int = self.framework __snake_case : List[str] = self.tokenizer(__a , return_tensors=__a , **__a ) return model_inputs def A_ ( self : List[Any] , __a : Tuple ) -> Tuple: '''simple docstring''' __snake_case : List[Any] = self.model(**__a ) return model_outputs def A_ ( self : List[str] , __a : Tuple , __a : Union[str, Any]=False ) -> Dict: '''simple docstring''' # [0] is the first available tensor, logits or last_hidden_state. if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : Optional[int] , *__a : str , **__a : List[Any] ) -> Union[str, Any]: '''simple docstring''' return super().__call__(*__a , **__a )
124
'''simple docstring''' import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) A__ : Union[str, Any] = logging.getLogger(__name__) A__ : List[str] = tf.data.AUTOTUNE def a_ ( ) -> Tuple: __snake_case : str = argparse.ArgumentParser(description='Train a masked language model on TPU.' ) parser.add_argument( '--pretrained_model_config' ,type=_UpperCAmelCase ,default='roberta-base' ,help='The model config to use. Note that we don\'t copy the model\'s weights, only the config!' ,) parser.add_argument( '--tokenizer' ,type=_UpperCAmelCase ,default='unigram-tokenizer-wikitext' ,help='The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.' ,) parser.add_argument( '--per_replica_batch_size' ,type=_UpperCAmelCase ,default=8 ,help='Batch size per TPU core.' ,) parser.add_argument( '--no_tpu' ,action='store_true' ,help='If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.' ,) parser.add_argument( '--tpu_name' ,type=_UpperCAmelCase ,help='Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.' ,default='local' ,) parser.add_argument( '--tpu_zone' ,type=_UpperCAmelCase ,help='Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.' ,) parser.add_argument( '--gcp_project' ,type=_UpperCAmelCase ,help='Google cloud project name. Only used for non-Colab TPU nodes.' ) parser.add_argument( '--bfloat16' ,action='store_true' ,help='Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.' ,) parser.add_argument( '--train_dataset' ,type=_UpperCAmelCase ,help='Path to training dataset to load. If the path begins with `gs://`' ' then the dataset will be loaded from a Google Cloud Storage bucket.' ,) parser.add_argument( '--shuffle_buffer_size' ,type=_UpperCAmelCase ,default=2**18 ,help='Size of the shuffle buffer (in samples)' ,) parser.add_argument( '--eval_dataset' ,type=_UpperCAmelCase ,help='Path to evaluation dataset to load. If the path begins with `gs://`' ' then the dataset will be loaded from a Google Cloud Storage bucket.' ,) parser.add_argument( '--num_epochs' ,type=_UpperCAmelCase ,default=1 ,help='Number of epochs to train for.' ,) parser.add_argument( '--learning_rate' ,type=_UpperCAmelCase ,default=1E-4 ,help='Learning rate to use for training.' ,) parser.add_argument( '--weight_decay_rate' ,type=_UpperCAmelCase ,default=1E-3 ,help='Weight decay rate to use for training.' ,) parser.add_argument( '--max_length' ,type=_UpperCAmelCase ,default=5_12 ,help='Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py' ,) parser.add_argument( '--mlm_probability' ,type=_UpperCAmelCase ,default=0.1_5 ,help='Fraction of tokens to mask during training.' ,) parser.add_argument('--output_dir' ,type=_UpperCAmelCase ,required=_UpperCAmelCase ,help='Path to save model checkpoints to.' ) parser.add_argument('--hub_model_id' ,type=_UpperCAmelCase ,help='Model ID to upload to on the Hugging Face Hub.' ) __snake_case : Dict = parser.parse_args() return args def a_ ( _UpperCAmelCase : int ) -> str: try: if args.tpu_name: __snake_case : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name ,zone=args.tpu_zone ,project=args.gcp_project ) else: __snake_case : Any = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( 'Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or ' '--gcp_project. When running on a TPU VM, use --tpu_name local.' ) tf.config.experimental_connect_to_cluster(_UpperCAmelCase ) tf.tpu.experimental.initialize_tpu_system(_UpperCAmelCase ) return tpu def a_ ( _UpperCAmelCase : int ) -> Optional[int]: __snake_case : Union[str, Any] = 0 for file in file_list: __snake_case : Optional[int] = file.split('/' )[-1] __snake_case : Optional[Any] = re.search(r'-\d+-(\d+)\.tfrecord' ,_UpperCAmelCase ).group(1 ) __snake_case : Any = int(_UpperCAmelCase ) num_samples += sample_count return num_samples def a_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple=None ) -> Dict: __snake_case : int = count_samples(_UpperCAmelCase ) __snake_case : Optional[Any] = tf.data.Dataset.from_tensor_slices(_UpperCAmelCase ) if shuffle: __snake_case : Optional[Any] = dataset.shuffle(len(_UpperCAmelCase ) ) __snake_case : Dict = tf.data.TFRecordDataset(_UpperCAmelCase ,num_parallel_reads=_UpperCAmelCase ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here __snake_case : Any = dataset.apply(tf.data.experimental.assert_cardinality(_UpperCAmelCase ) ) __snake_case : Dict = dataset.map(_UpperCAmelCase ,num_parallel_calls=_UpperCAmelCase ) if shuffle: assert shuffle_buffer_size is not None __snake_case : Optional[Any] = dataset.shuffle(args.shuffle_buffer_size ) __snake_case : Any = dataset.batch(_UpperCAmelCase ,drop_remainder=_UpperCAmelCase ) __snake_case : int = dataset.map(_UpperCAmelCase ,num_parallel_calls=_UpperCAmelCase ) __snake_case : Optional[int] = dataset.prefetch(_UpperCAmelCase ) return dataset def a_ ( _UpperCAmelCase : int ) -> List[Any]: if not args.no_tpu: __snake_case : Tuple = initialize_tpu(_UpperCAmelCase ) __snake_case : Optional[Any] = tf.distribute.TPUStrategy(_UpperCAmelCase ) else: __snake_case : Optional[int] = tf.distribute.OneDeviceStrategy(device='/gpu:0' ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy('mixed_bfloat16' ) __snake_case : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer ) __snake_case : int = AutoConfig.from_pretrained(args.pretrained_model_config ) __snake_case : List[Any] = tokenizer.vocab_size __snake_case : Any = tf.io.gfile.glob(os.path.join(args.train_dataset ,'*.tfrecord' ) ) if not training_records: raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' ) __snake_case : List[str] = tf.io.gfile.glob(os.path.join(args.eval_dataset ,'*.tfrecord' ) ) if not eval_records: raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' ) __snake_case : Optional[int] = count_samples(_UpperCAmelCase ) __snake_case : Optional[Any] = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) __snake_case : Dict = steps_per_epoch * args.num_epochs with strategy.scope(): __snake_case : int = TFAutoModelForMaskedLM.from_config(_UpperCAmelCase ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built __snake_case , __snake_case : int = create_optimizer( num_train_steps=_UpperCAmelCase ,num_warmup_steps=total_train_steps // 20 ,init_lr=args.learning_rate ,weight_decay_rate=args.weight_decay_rate ,) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=_UpperCAmelCase ,metrics=['accuracy'] ) def decode_fn(_UpperCAmelCase : Optional[Any] ): __snake_case : Optional[Any] = { 'input_ids': tf.io.FixedLenFeature(dtype=tf.intaa ,shape=(args.max_length,) ), 'attention_mask': tf.io.FixedLenFeature(dtype=tf.intaa ,shape=(args.max_length,) ), } return tf.io.parse_single_example(_UpperCAmelCase ,_UpperCAmelCase ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. __snake_case : Optional[Any] = DataCollatorForLanguageModeling( tokenizer=_UpperCAmelCase ,mlm_probability=args.mlm_probability ,mlm=_UpperCAmelCase ,return_tensors='tf' ) def mask_with_collator(_UpperCAmelCase : List[str] ): # TF really needs an isin() function __snake_case : int = ( ~tf.cast(batch['attention_mask'] ,tf.bool ) | (batch['input_ids'] == tokenizer.cls_token_id) | (batch['input_ids'] == tokenizer.sep_token_id) ) __snake_case , __snake_case : str = data_collator.tf_mask_tokens( batch['input_ids'] ,vocab_size=len(_UpperCAmelCase ) ,mask_token_id=tokenizer.mask_token_id ,special_tokens_mask=_UpperCAmelCase ,) return batch __snake_case : int = args.per_replica_batch_size * strategy.num_replicas_in_sync __snake_case : Optional[int] = prepare_dataset( _UpperCAmelCase ,decode_fn=_UpperCAmelCase ,mask_fn=_UpperCAmelCase ,batch_size=_UpperCAmelCase ,shuffle=_UpperCAmelCase ,shuffle_buffer_size=args.shuffle_buffer_size ,) __snake_case : List[Any] = prepare_dataset( _UpperCAmelCase ,decode_fn=_UpperCAmelCase ,mask_fn=_UpperCAmelCase ,batch_size=_UpperCAmelCase ,shuffle=_UpperCAmelCase ,) __snake_case : Tuple = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir ,hub_model_id=args.hub_model_id ,tokenizer=_UpperCAmelCase ) ) model.fit( _UpperCAmelCase ,validation_data=_UpperCAmelCase ,epochs=args.num_epochs ,callbacks=_UpperCAmelCase ,) model.save_pretrained(args.output_dir ) if __name__ == "__main__": A__ : List[str] = parse_args() main(args)
124
1
import glob import os import random from string import ascii_lowercase, digits import cva a__ : List[str] = '' a__ : Optional[int] = '' a__ : int = '' a__ : List[Any] = 1 # (0 is vertical, 1 is horizontal) def UpperCAmelCase_ ( ) -> None: '''simple docstring''' A_ = get_dataset(_UpperCAmelCase , _UpperCAmelCase ) print('''Processing...''' ) A_ = update_image_and_anno(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for index, image in enumerate(_UpperCAmelCase ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' A_ = random_chars(32 ) A_ = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0] A_ = f'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}' cva.imwrite(f'/{file_root}.jpg' , _UpperCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'Success {index+1}/{len(_UpperCAmelCase )} with {file_name}' ) A_ = [] for anno in new_annos[index]: A_ = f'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}' annos_list.append(_UpperCAmelCase ) with open(f'/{file_root}.txt' , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def UpperCAmelCase_ ( _UpperCAmelCase :Tuple , _UpperCAmelCase :Any ) -> tuple[list, list]: '''simple docstring''' A_ = [] A_ = [] for label_file in glob.glob(os.path.join(_UpperCAmelCase , '''*.txt''' ) ): A_ = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(_UpperCAmelCase ) as in_file: A_ = in_file.readlines() A_ = os.path.join(_UpperCAmelCase , f'{label_name}.jpg' ) A_ = [] for obj_list in obj_lists: A_ = obj_list.rstrip('''\n''' ).split(''' ''' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(_UpperCAmelCase ) labels.append(_UpperCAmelCase ) return img_paths, labels def UpperCAmelCase_ ( _UpperCAmelCase :Optional[Any] , _UpperCAmelCase :Union[str, Any] , _UpperCAmelCase :str = 1 ) -> tuple[list, list, list]: '''simple docstring''' A_ = [] A_ = [] A_ = [] for idx in range(len(_UpperCAmelCase ) ): A_ = [] A_ = img_list[idx] path_list.append(_UpperCAmelCase ) A_ = anno_list[idx] A_ = cva.imread(_UpperCAmelCase ) if flip_type == 1: A_ = cva.flip(_UpperCAmelCase , _UpperCAmelCase ) for bbox in img_annos: A_ = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: A_ = cva.flip(_UpperCAmelCase , _UpperCAmelCase ) for bbox in img_annos: A_ = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(_UpperCAmelCase ) new_imgs_list.append(_UpperCAmelCase ) return new_imgs_list, new_annos_lists, path_list def UpperCAmelCase_ ( _UpperCAmelCase :int = 32 ) -> str: '''simple docstring''' assert number_char > 1, "The number of character should greater than 1" A_ = ascii_lowercase + digits return "".join(random.choice(_UpperCAmelCase ) for _ in range(_UpperCAmelCase ) ) if __name__ == "__main__": main() print('DONE ✅')
188
from __future__ import annotations def snake_case( __magic_name__ , __magic_name__ ) -> list[list[int]]: '''simple docstring''' lowercase : list[list[int]] = [] lowercase : list[int] = [] lowercase : List[str] = 0 lowercase : Any = sum(__magic_name__ ) create_state_space_tree(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) return result def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> None: '''simple docstring''' if sum(__magic_name__ ) > max_sum or (remaining_nums_sum + sum(__magic_name__ )) < max_sum: return if sum(__magic_name__ ) == max_sum: result.append(__magic_name__ ) return for index in range(__magic_name__ , len(__magic_name__ ) ): create_state_space_tree( __magic_name__ , __magic_name__ , index + 1 , [*path, nums[index]] , __magic_name__ , remaining_nums_sum - nums[index] , ) lowerCAmelCase_ = [3, 34, 4, 12, 5, 2] lowerCAmelCase_ = 9 lowerCAmelCase_ = generate_sum_of_subsets_soln(nums, max_sum) print(*result)
217
0
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class __UpperCamelCase ( unittest.TestCase ): def __A ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase_ = tempfile.mkdtemp() # fmt: off UpperCAmelCase_ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on UpperCAmelCase_ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) ) UpperCAmelCase_ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] UpperCAmelCase_ = {"unk_token": "<unk>"} UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCAmelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowerCAmelCase ) ) UpperCAmelCase_ = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073], "image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711], } UpperCAmelCase_ = os.path.join(self.tmpdirname , lowerCAmelCase ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(lowerCAmelCase , lowerCAmelCase ) def __A ( self : Optional[Any] , **lowerCAmelCase : Any ): '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase ) def __A ( self : Tuple , **lowerCAmelCase : List[Any] ): '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase ) def __A ( self : Union[str, Any] , **lowerCAmelCase : int ): '''simple docstring''' return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase ) def __A ( self : str ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __A ( self : Dict ): '''simple docstring''' UpperCAmelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] UpperCAmelCase_ = [Image.fromarray(np.moveaxis(lowerCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def __A ( self : Tuple ): '''simple docstring''' UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = self.get_rust_tokenizer() UpperCAmelCase_ = self.get_image_processor() UpperCAmelCase_ = CLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) UpperCAmelCase_ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase ) UpperCAmelCase_ = CLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) UpperCAmelCase_ = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase ) self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase ) self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase ) def __A ( self : Tuple ): '''simple docstring''' UpperCAmelCase_ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) UpperCAmelCase_ = self.get_image_processor(do_normalize=lowerCAmelCase , padding_value=1.0 ) UpperCAmelCase_ = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowerCAmelCase ) def __A ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ = self.get_image_processor() UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = CLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase ) UpperCAmelCase_ = self.prepare_image_inputs() UpperCAmelCase_ = image_processor(lowerCAmelCase , return_tensors="np" ) UpperCAmelCase_ = processor(images=lowerCAmelCase , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __A ( self : Tuple ): '''simple docstring''' UpperCAmelCase_ = self.get_image_processor() UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = CLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase ) UpperCAmelCase_ = "lower newer" UpperCAmelCase_ = processor(text=lowerCAmelCase ) UpperCAmelCase_ = tokenizer(lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self : Any ): '''simple docstring''' UpperCAmelCase_ = self.get_image_processor() UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = CLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase ) UpperCAmelCase_ = "lower newer" UpperCAmelCase_ = self.prepare_image_inputs() UpperCAmelCase_ = processor(text=lowerCAmelCase , images=lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(lowerCAmelCase ): processor() def __A ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ = self.get_image_processor() UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = CLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase ) UpperCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCAmelCase_ = processor.batch_decode(lowerCAmelCase ) UpperCAmelCase_ = tokenizer.batch_decode(lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) def __A ( self : List[Any] ): '''simple docstring''' UpperCAmelCase_ = self.get_image_processor() UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = CLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase ) UpperCAmelCase_ = "lower newer" UpperCAmelCase_ = self.prepare_image_inputs() UpperCAmelCase_ = processor(text=lowerCAmelCase , images=lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
268
from ...configuration_utils import PretrainedConfig from ...utils import logging _a: List[str] = logging.get_logger(__name__) _a: Any = { """sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""", # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class __UpperCamelCase ( lowercase ): SCREAMING_SNAKE_CASE__ = 'vit_msn' def __init__( self : Union[str, Any] , lowerCAmelCase : Optional[int]=768 , lowerCAmelCase : Dict=12 , lowerCAmelCase : str=12 , lowerCAmelCase : List[str]=3_072 , lowerCAmelCase : Tuple="gelu" , lowerCAmelCase : int=0.0 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Union[str, Any]=0.02 , lowerCAmelCase : str=1e-06 , lowerCAmelCase : Dict=224 , lowerCAmelCase : str=16 , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Union[str, Any]=True , **lowerCAmelCase : List[str] , ): '''simple docstring''' super().__init__(**lowerCAmelCase ) UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = initializer_range UpperCAmelCase_ = layer_norm_eps UpperCAmelCase_ = image_size UpperCAmelCase_ = patch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = qkv_bias
268
1
'''simple docstring''' def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = 0 while num > 0: digit_sum += num % 10 num //= 10 return digit_sum def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = 100 ): _snake_case = 1 _snake_case = 2 for i in range(2 , max_n + 1 ): _snake_case = pre_numerator _snake_case = 2 * i // 3 if i % 3 == 0 else 1 _snake_case = cur_numerator _snake_case = e_cont * pre_numerator + temp return sum_digits(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(f'''{solution() = }''')
585
'''simple docstring''' from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig __lowerCAmelCase = logging.get_logger(__name__) # General docstring __lowerCAmelCase = 'MobileNetV1Config' # Base docstring __lowerCAmelCase = 'google/mobilenet_v1_1.0_224' __lowerCAmelCase = [1, 1_024, 7, 7] # Image classification docstring __lowerCAmelCase = 'google/mobilenet_v1_1.0_224' __lowerCAmelCase = 'tabby, tabby cat' __lowerCAmelCase = [ 'google/mobilenet_v1_1.0_224', 'google/mobilenet_v1_0.75_192', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ): _snake_case = {} if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = model.mobilenet_va else: _snake_case = model _snake_case = """MobilenetV1/Conv2d_0/""" _snake_case = backbone.conv_stem.convolution.weight _snake_case = backbone.conv_stem.normalization.bias _snake_case = backbone.conv_stem.normalization.weight _snake_case = backbone.conv_stem.normalization.running_mean _snake_case = backbone.conv_stem.normalization.running_var for i in range(13 ): _snake_case = i + 1 _snake_case = i * 2 _snake_case = backbone.layer[pt_index] _snake_case = f"""MobilenetV1/Conv2d_{tf_index}_depthwise/""" _snake_case = pointer.convolution.weight _snake_case = pointer.normalization.bias _snake_case = pointer.normalization.weight _snake_case = pointer.normalization.running_mean _snake_case = pointer.normalization.running_var _snake_case = backbone.layer[pt_index + 1] _snake_case = f"""MobilenetV1/Conv2d_{tf_index}_pointwise/""" _snake_case = pointer.convolution.weight _snake_case = pointer.normalization.bias _snake_case = pointer.normalization.weight _snake_case = pointer.normalization.running_mean _snake_case = pointer.normalization.running_var if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = """MobilenetV1/Logits/Conv2d_1c_1x1/""" _snake_case = model.classifier.weight _snake_case = model.classifier.bias return tf_to_pt_map def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): try: import numpy as np import tensorflow as tf except ImportError: logger.error( """Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """ """https://www.tensorflow.org/install/ for installation instructions.""" ) raise # Load weights from TF model _snake_case = tf.train.list_variables(_SCREAMING_SNAKE_CASE ) _snake_case = {} for name, shape in init_vars: logger.info(f"""Loading TF weight {name} with shape {shape}""" ) _snake_case = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _snake_case = array # Build TF to PyTorch weights loading map _snake_case = _build_tf_to_pytorch_map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for name, pointer in tf_to_pt_map.items(): logger.info(f"""Importing {name}""" ) if name not in tf_weights: logger.info(f"""{name} not in tf pre-trained weights, skipping""" ) continue _snake_case = tf_weights[name] if "depthwise_weights" in name: logger.info("""Transposing depthwise""" ) _snake_case = np.transpose(_SCREAMING_SNAKE_CASE , (2, 3, 0, 1) ) elif "weights" in name: logger.info("""Transposing""" ) if len(pointer.shape ) == 2: # copying into linear layer _snake_case = array.squeeze().transpose() else: _snake_case = np.transpose(_SCREAMING_SNAKE_CASE , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(f"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" ) logger.info(f"""Initialize PyTorch weight {name} {array.shape}""" ) _snake_case = torch.from_numpy(_SCREAMING_SNAKE_CASE ) tf_weights.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) tf_weights.pop(name + """/RMSProp""" , _SCREAMING_SNAKE_CASE ) tf_weights.pop(name + """/RMSProp_1""" , _SCREAMING_SNAKE_CASE ) tf_weights.pop(name + """/ExponentialMovingAverage""" , _SCREAMING_SNAKE_CASE ) logger.info(f"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" ) return model def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case, _snake_case = features.shape[-2:] _snake_case, _snake_case = conv_layer.stride _snake_case, _snake_case = conv_layer.kernel_size if in_height % stride_height == 0: _snake_case = max(kernel_height - stride_height , 0 ) else: _snake_case = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: _snake_case = max(kernel_width - stride_width , 0 ) else: _snake_case = max(kernel_width - (in_width % stride_width) , 0 ) _snake_case = pad_along_width // 2 _snake_case = pad_along_width - pad_left _snake_case = pad_along_height // 2 _snake_case = pad_along_height - pad_top _snake_case = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """constant""" , 0.0 ) class _lowerCAmelCase ( nn.Module ): '''simple docstring''' def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1 , UpperCAmelCase = 1 , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = True , ) -> None: super().__init__() _snake_case = config if in_channels % groups != 0: raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" ) if out_channels % groups != 0: raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" ) _snake_case = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) _snake_case = nn.Convad( in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , kernel_size=UpperCAmelCase , stride=UpperCAmelCase , padding=UpperCAmelCase , groups=UpperCAmelCase , bias=UpperCAmelCase , padding_mode="""zeros""" , ) if use_normalization: _snake_case = nn.BatchNormad( num_features=UpperCAmelCase , eps=config.layer_norm_eps , momentum=0.9997 , affine=UpperCAmelCase , track_running_stats=UpperCAmelCase , ) else: _snake_case = None if use_activation: if isinstance(UpperCAmelCase , UpperCAmelCase ): _snake_case = ACTaFN[use_activation] elif isinstance(config.hidden_act , UpperCAmelCase ): _snake_case = ACTaFN[config.hidden_act] else: _snake_case = config.hidden_act else: _snake_case = None def lowercase (self , UpperCAmelCase ) -> torch.Tensor: if self.config.tf_padding: _snake_case = apply_tf_padding(UpperCAmelCase , self.convolution ) _snake_case = self.convolution(UpperCAmelCase ) if self.normalization is not None: _snake_case = self.normalization(UpperCAmelCase ) if self.activation is not None: _snake_case = self.activation(UpperCAmelCase ) return features class _lowerCAmelCase ( __snake_case ): '''simple docstring''' lowerCAmelCase_ = MobileNetVaConfig lowerCAmelCase_ = load_tf_weights_in_mobilenet_va lowerCAmelCase_ = "mobilenet_v1" lowerCAmelCase_ = "pixel_values" lowerCAmelCase_ = False def lowercase (self , UpperCAmelCase ) -> None: if isinstance(UpperCAmelCase , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(UpperCAmelCase , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) __lowerCAmelCase = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' __lowerCAmelCase = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , __snake_case , ) class _lowerCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , UpperCAmelCase , UpperCAmelCase = True ) -> Dict: super().__init__(UpperCAmelCase ) _snake_case = config _snake_case = 32 _snake_case = max(int(depth * config.depth_multiplier ) , config.min_depth ) _snake_case = MobileNetVaConvLayer( UpperCAmelCase , in_channels=config.num_channels , out_channels=UpperCAmelCase , kernel_size=3 , stride=2 , ) _snake_case = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] _snake_case = nn.ModuleList() for i in range(13 ): _snake_case = out_channels if strides[i] == 2 or i == 0: depth *= 2 _snake_case = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , kernel_size=3 , stride=strides[i] , groups=UpperCAmelCase , ) ) self.layer.append( MobileNetVaConvLayer( UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , kernel_size=1 , ) ) _snake_case = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def lowercase (self , UpperCAmelCase ) -> Dict: raise NotImplementedError @add_start_docstrings_to_model_forward(UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowercase (self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]: _snake_case = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _snake_case = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("""You have to specify pixel_values""" ) _snake_case = self.conv_stem(UpperCAmelCase ) _snake_case = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): _snake_case = layer_module(UpperCAmelCase ) if output_hidden_states: _snake_case = all_hidden_states + (hidden_states,) _snake_case = hidden_states if self.pooler is not None: _snake_case = torch.flatten(self.pooler(UpperCAmelCase ) , start_dim=1 ) else: _snake_case = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=UpperCAmelCase , pooler_output=UpperCAmelCase , hidden_states=UpperCAmelCase , ) @add_start_docstrings( "\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __snake_case , ) class _lowerCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , UpperCAmelCase ) -> None: super().__init__(UpperCAmelCase ) _snake_case = config.num_labels _snake_case = MobileNetVaModel(UpperCAmelCase ) _snake_case = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head _snake_case = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCAmelCase ) _snake_case = nn.Linear(UpperCAmelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowercase (self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]: _snake_case = return_dict if return_dict is not None else self.config.use_return_dict _snake_case = self.mobilenet_va(UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase ) _snake_case = outputs.pooler_output if return_dict else outputs[1] _snake_case = self.classifier(self.dropout(UpperCAmelCase ) ) _snake_case = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _snake_case = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _snake_case = """single_label_classification""" else: _snake_case = """multi_label_classification""" if self.config.problem_type == "regression": _snake_case = MSELoss() if self.num_labels == 1: _snake_case = loss_fct(logits.squeeze() , labels.squeeze() ) else: _snake_case = loss_fct(UpperCAmelCase , UpperCAmelCase ) elif self.config.problem_type == "single_label_classification": _snake_case = CrossEntropyLoss() _snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _snake_case = BCEWithLogitsLoss() _snake_case = loss_fct(UpperCAmelCase , UpperCAmelCase ) if not return_dict: _snake_case = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=UpperCAmelCase , logits=UpperCAmelCase , hidden_states=outputs.hidden_states , )
585
1
import random def __lowercase ( UpperCAmelCase__ ): """simple docstring""" __lowerCAmelCase = num - 1 __lowerCAmelCase = 0 while s % 2 == 0: __lowerCAmelCase = s // 2 t += 1 for _ in range(5 ): __lowerCAmelCase = random.randrange(2 , num - 1 ) __lowerCAmelCase = pow(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) if v != 1: __lowerCAmelCase = 0 while v != (num - 1): if i == t - 1: return False else: __lowerCAmelCase = i + 1 __lowerCAmelCase = (v**2) % num return True def __lowercase ( UpperCAmelCase__ ): """simple docstring""" if num < 2: return False __lowerCAmelCase = [ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, ] if num in low_primes: return True for prime in low_primes: if (num % prime) == 0: return False return rabin_miller(UpperCAmelCase__ ) def __lowercase ( UpperCAmelCase__ = 1_024 ): """simple docstring""" while True: __lowerCAmelCase = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) ) if is_prime_low_num(UpperCAmelCase__ ): return num if __name__ == "__main__": lowerCamelCase = generate_large_prime() print(('''Prime number:''', num)) print(('''is_prime_low_num:''', is_prime_low_num(num)))
102
import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 lowerCamelCase = data_utils.TransfoXLTokenizer lowerCamelCase = data_utils.TransfoXLCorpus lowerCamelCase = data_utils lowerCamelCase = data_utils def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): """simple docstring""" if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(UpperCAmelCase__ , 'rb' ) as fp: __lowerCAmelCase = pickle.load(UpperCAmelCase__ , encoding='latin1' ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) __lowerCAmelCase = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file'] print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" ) __lowerCAmelCase = corpus.vocab.__dict__ torch.save(UpperCAmelCase__ , UpperCAmelCase__ ) __lowerCAmelCase = corpus.__dict__ corpus_dict_no_vocab.pop('vocab' , UpperCAmelCase__ ) __lowerCAmelCase = pytorch_dump_folder_path + '/' + CORPUS_NAME print(F"""Save dataset to {pytorch_dataset_dump_path}""" ) torch.save(UpperCAmelCase__ , UpperCAmelCase__ ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model __lowerCAmelCase = os.path.abspath(UpperCAmelCase__ ) __lowerCAmelCase = os.path.abspath(UpperCAmelCase__ ) print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" ) # Initialise PyTorch model if transfo_xl_config_file == "": __lowerCAmelCase = TransfoXLConfig() else: __lowerCAmelCase = TransfoXLConfig.from_json_file(UpperCAmelCase__ ) print(F"""Building PyTorch model from configuration: {config}""" ) __lowerCAmelCase = TransfoXLLMHeadModel(UpperCAmelCase__ ) __lowerCAmelCase = load_tf_weights_in_transfo_xl(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Save pytorch-model __lowerCAmelCase = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) __lowerCAmelCase = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) print(F"""Save PyTorch model to {os.path.abspath(UpperCAmelCase__ )}""" ) torch.save(model.state_dict() , UpperCAmelCase__ ) print(F"""Save configuration file to {os.path.abspath(UpperCAmelCase__ )}""" ) with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCamelCase = argparse.ArgumentParser() parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the folder to store the PyTorch model or dataset/vocab.''', ) parser.add_argument( '''--tf_checkpoint_path''', default='''''', type=str, help='''An optional path to a TensorFlow checkpoint path to be converted.''', ) parser.add_argument( '''--transfo_xl_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--transfo_xl_dataset_file''', default='''''', type=str, help='''An optional dataset file to be converted in a vocabulary.''', ) lowerCamelCase = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
102
1
"""simple docstring""" from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=lowerCAmelCase__ ) class lowerCamelCase ( lowerCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} ) SCREAMING_SNAKE_CASE = Features({'text': Value('string' )} ) SCREAMING_SNAKE_CASE = Features({} ) SCREAMING_SNAKE_CASE = "text" @property def _a (self ): """simple docstring""" return {self.text_column: "text"}
182
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def _a (self ): """simple docstring""" UpperCAmelCase__ : Dict = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=_lowerCamelCase ).to(_lowerCamelCase ) UpperCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained("""google/mt5-small""" ) UpperCAmelCase__ : Any = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids UpperCAmelCase__ : str = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids UpperCAmelCase__ : Tuple = model(input_ids.to(_lowerCamelCase ) , labels=labels.to(_lowerCamelCase ) ).loss UpperCAmelCase__ : Union[str, Any] = -(labels.shape[-1] * loss.item()) UpperCAmelCase__ : List[str] = -84.9_127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
182
1
import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( 'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion' ) UpperCamelCase = None UpperCamelCase = { '7B': 1_1008, '13B': 1_3824, '30B': 1_7920, '65B': 2_2016, '70B': 2_8672, } UpperCamelCase = { '7B': 1, '7Bf': 1, '13B': 2, '13Bf': 2, '30B': 4, '65B': 8, '70B': 8, '70Bf': 8, } def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : List[Any]=256 ): """simple docstring""" return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def _A ( lowerCAmelCase_ : Any ): """simple docstring""" with open(lowerCAmelCase_ , "r" ) as f: return json.load(lowerCAmelCase_ ) def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] ): """simple docstring""" with open(lowerCAmelCase_ , "w" ) as f: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) def _A ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str]=True ): """simple docstring""" os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) lowerCAmelCase__ = os.path.join(lowerCAmelCase_ , "tmp" ) os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) lowerCAmelCase__ = read_json(os.path.join(lowerCAmelCase_ , "params.json" ) ) lowerCAmelCase__ = NUM_SHARDS[model_size] lowerCAmelCase__ = params['''n_layers'''] lowerCAmelCase__ = params['''n_heads'''] lowerCAmelCase__ = n_heads // num_shards lowerCAmelCase__ = params['''dim'''] lowerCAmelCase__ = dim // n_heads lowerCAmelCase__ = 1_0000.0 lowerCAmelCase__ = 1.0 / (base ** (torch.arange(0 , lowerCAmelCase_ , 2 ).float() / dims_per_head)) if "n_kv_heads" in params: lowerCAmelCase__ = params['''n_kv_heads'''] # for GQA / MQA lowerCAmelCase__ = n_heads_per_shard // num_key_value_heads lowerCAmelCase__ = dim // num_key_value_heads else: # compatibility with other checkpoints lowerCAmelCase__ = n_heads lowerCAmelCase__ = n_heads_per_shard lowerCAmelCase__ = dim # permute for sliced rotary def permute(lowerCAmelCase_ : str , lowerCAmelCase_ : int=n_heads , lowerCAmelCase_ : str=dim , lowerCAmelCase_ : List[str]=dim ): return w.view(lowerCAmelCase_ , dima // n_heads // 2 , 2 , lowerCAmelCase_ ).transpose(1 , 2 ).reshape(lowerCAmelCase_ , lowerCAmelCase_ ) print(F'Fetching all parameters from the checkpoint at {input_base_path}.' ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) lowerCAmelCase__ = torch.load(os.path.join(lowerCAmelCase_ , "consolidated.00.pth" ) , map_location="cpu" ) else: # Sharded lowerCAmelCase__ = [ torch.load(os.path.join(lowerCAmelCase_ , F'consolidated.{i:02d}.pth' ) , map_location="cpu" ) for i in range(lowerCAmelCase_ ) ] lowerCAmelCase__ = 0 lowerCAmelCase__ = {'''weight_map''': {}} for layer_i in range(lowerCAmelCase_ ): lowerCAmelCase__ = F'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin' if model_size == "7B": # Unsharded lowerCAmelCase__ = { F'model.layers.{layer_i}.self_attn.q_proj.weight': permute( loaded[F'layers.{layer_i}.attention.wq.weight'] ), F'model.layers.{layer_i}.self_attn.k_proj.weight': permute( loaded[F'layers.{layer_i}.attention.wk.weight'] ), F'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[F'layers.{layer_i}.attention.wv.weight'], F'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[F'layers.{layer_i}.attention.wo.weight'], F'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[F'layers.{layer_i}.feed_forward.w1.weight'], F'model.layers.{layer_i}.mlp.down_proj.weight': loaded[F'layers.{layer_i}.feed_forward.w2.weight'], F'model.layers.{layer_i}.mlp.up_proj.weight': loaded[F'layers.{layer_i}.feed_forward.w3.weight'], F'model.layers.{layer_i}.input_layernorm.weight': loaded[F'layers.{layer_i}.attention_norm.weight'], F'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[F'layers.{layer_i}.ffn_norm.weight'], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. lowerCAmelCase__ = { F'model.layers.{layer_i}.input_layernorm.weight': loaded[0][ F'layers.{layer_i}.attention_norm.weight' ].clone(), F'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][ F'layers.{layer_i}.ffn_norm.weight' ].clone(), } lowerCAmelCase__ = permute( torch.cat( [ loaded[i][F'layers.{layer_i}.attention.wq.weight'].view(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for i in range(lowerCAmelCase_ ) ] , dim=0 , ).reshape(lowerCAmelCase_ , lowerCAmelCase_ ) ) lowerCAmelCase__ = permute( torch.cat( [ loaded[i][F'layers.{layer_i}.attention.wk.weight'].view( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for i in range(lowerCAmelCase_ ) ] , dim=0 , ).reshape(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) lowerCAmelCase__ = torch.cat( [ loaded[i][F'layers.{layer_i}.attention.wv.weight'].view( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for i in range(lowerCAmelCase_ ) ] , dim=0 , ).reshape(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase__ = torch.cat( [loaded[i][F'layers.{layer_i}.attention.wo.weight'] for i in range(lowerCAmelCase_ )] , dim=1 ) lowerCAmelCase__ = torch.cat( [loaded[i][F'layers.{layer_i}.feed_forward.w1.weight'] for i in range(lowerCAmelCase_ )] , dim=0 ) lowerCAmelCase__ = torch.cat( [loaded[i][F'layers.{layer_i}.feed_forward.w2.weight'] for i in range(lowerCAmelCase_ )] , dim=1 ) lowerCAmelCase__ = torch.cat( [loaded[i][F'layers.{layer_i}.feed_forward.w3.weight'] for i in range(lowerCAmelCase_ )] , dim=0 ) lowerCAmelCase__ = inv_freq for k, v in state_dict.items(): lowerCAmelCase__ = filename param_count += v.numel() torch.save(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) ) lowerCAmelCase__ = F'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin' if model_size == "7B": # Unsharded lowerCAmelCase__ = { '''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''], '''model.norm.weight''': loaded['''norm.weight'''], '''lm_head.weight''': loaded['''output.weight'''], } else: lowerCAmelCase__ = { '''model.norm.weight''': loaded[0]['''norm.weight'''], '''model.embed_tokens.weight''': torch.cat( [loaded[i]["tok_embeddings.weight"] for i in range(lowerCAmelCase_ )] , dim=1 ), '''lm_head.weight''': torch.cat([loaded[i]["output.weight"] for i in range(lowerCAmelCase_ )] , dim=0 ), } for k, v in state_dict.items(): lowerCAmelCase__ = filename param_count += v.numel() torch.save(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) ) # Write configs lowerCAmelCase__ = {'''total_size''': param_count * 2} write_json(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , "pytorch_model.bin.index.json" ) ) lowerCAmelCase__ = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1 lowerCAmelCase__ = params['''multiple_of'''] if '''multiple_of''' in params else 256 lowerCAmelCase__ = LlamaConfig( hidden_size=lowerCAmelCase_ , intermediate_size=compute_intermediate_size(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , num_attention_heads=params["n_heads"] , num_hidden_layers=params["n_layers"] , rms_norm_eps=params["norm_eps"] , num_key_value_heads=lowerCAmelCase_ , ) config.save_pretrained(lowerCAmelCase_ ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print("Loading the checkpoint in a Llama model." ) lowerCAmelCase__ = LlamaForCausalLM.from_pretrained(lowerCAmelCase_ , torch_dtype=torch.floataa , low_cpu_mem_usage=lowerCAmelCase_ ) # Avoid saving this as part of the config. del model.config._name_or_path print("Saving in the Transformers format." ) model.save_pretrained(lowerCAmelCase_ , safe_serialization=lowerCAmelCase_ ) shutil.rmtree(lowerCAmelCase_ ) def _A ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(F'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' ) lowerCAmelCase__ = tokenizer_class(lowerCAmelCase_ ) tokenizer.save_pretrained(lowerCAmelCase_ ) def _A ( ): """simple docstring""" lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument( "--input_dir" , help="Location of LLaMA weights, which contains tokenizer.model and model folders" , ) parser.add_argument( "--model_size" , choices=["7B", "7Bf", "13B", "13Bf", "30B", "65B", "70B", "70Bf", "tokenizer_only"] , ) parser.add_argument( "--output_dir" , help="Location to write HF model and tokenizer" , ) parser.add_argument("--safe_serialization" , type=lowerCAmelCase_ , help="Whether or not to save using `safetensors`." ) lowerCAmelCase__ = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , ) lowerCAmelCase__ = os.path.join(args.input_dir , "tokenizer.model" ) write_tokenizer(args.output_dir , lowerCAmelCase_ ) if __name__ == "__main__": main()
703
from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def _A ( ): """simple docstring""" lowerCAmelCase__ = [randint(-1000 , 1000 ) for i in range(10 )] lowerCAmelCase__ = randint(-5000 , 5000 ) return (arr, r) UpperCamelCase = make_dataset() def _A ( lowerCAmelCase_ : list[int] , lowerCAmelCase_ : int ): """simple docstring""" for triplet in permutations(lowerCAmelCase_ , 3 ): if sum(lowerCAmelCase_ ) == target: return tuple(sorted(lowerCAmelCase_ ) ) return (0, 0, 0) def _A ( lowerCAmelCase_ : list[int] , lowerCAmelCase_ : int ): """simple docstring""" arr.sort() lowerCAmelCase__ = len(lowerCAmelCase_ ) for i in range(n - 1 ): lowerCAmelCase__ , lowerCAmelCase__ = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def _A ( ): """simple docstring""" lowerCAmelCase__ = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n" lowerCAmelCase__ = "\ntriplet_sum1(*dataset)\n" lowerCAmelCase__ = "\ntriplet_sum2(*dataset)\n" lowerCAmelCase__ = repeat(setup=lowerCAmelCase_ , stmt=lowerCAmelCase_ , repeat=5 , number=1_0000 ) lowerCAmelCase__ = repeat(setup=lowerCAmelCase_ , stmt=lowerCAmelCase_ , repeat=5 , number=1_0000 ) return (min(lowerCAmelCase_ ), min(lowerCAmelCase_ )) if __name__ == "__main__": from doctest import testmod testmod() UpperCamelCase = solution_times() print(F"""The time for naive implementation is {times[0]}.""") print(F"""The time for optimized implementation is {times[1]}.""")
125
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) SCREAMING_SNAKE_CASE__ = { "configuration_owlvit": [ "OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OwlViTConfig", "OwlViTOnnxConfig", "OwlViTTextConfig", "OwlViTVisionConfig", ], "processing_owlvit": ["OwlViTProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ["OwlViTFeatureExtractor"] SCREAMING_SNAKE_CASE__ = ["OwlViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ "OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "OwlViTModel", "OwlViTPreTrainedModel", "OwlViTTextModel", "OwlViTVisionModel", "OwlViTForObjectDetection", ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
631
def lowercase ( a , a , a , a ): '''simple docstring''' SCREAMING_SNAKE_CASE_ :int = [False] * len(a ) SCREAMING_SNAKE_CASE_ :List[Any] = [] queue.append(a ) SCREAMING_SNAKE_CASE_ :int = True while queue: SCREAMING_SNAKE_CASE_ :int = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(a ) SCREAMING_SNAKE_CASE_ :Tuple = True SCREAMING_SNAKE_CASE_ :Optional[int] = u return visited[t] def lowercase ( a , a , a ): '''simple docstring''' SCREAMING_SNAKE_CASE_ :Any = [-1] * (len(a )) SCREAMING_SNAKE_CASE_ :Tuple = 0 while bfs(a , a , a , a ): SCREAMING_SNAKE_CASE_ :List[Any] = float("Inf" ) SCREAMING_SNAKE_CASE_ :str = sink while s != source: # Find the minimum value in select path SCREAMING_SNAKE_CASE_ :str = min(a , graph[parent[s]][s] ) SCREAMING_SNAKE_CASE_ :Optional[Any] = parent[s] max_flow += path_flow SCREAMING_SNAKE_CASE_ :Dict = sink while v != source: SCREAMING_SNAKE_CASE_ :int = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow SCREAMING_SNAKE_CASE_ :Any = parent[v] return max_flow SCREAMING_SNAKE_CASE__ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0, 5 print(ford_fulkerson(graph, source, sink))
631
1
import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values snake_case_ : Optional[Any] = argparse.ArgumentParser() parser.add_argument("--user", type=str, default="ubuntu") parser.add_argument("--host", type=str, default="localhost") parser.add_argument("--key_path", type=str, default=None) parser.add_argument("--instance", type=str, default="V100:1") parser.add_argument("--provider", type=str, default="cheapest") parser.add_argument("--use_spot", type=bool, default=False) parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py") snake_case_ , snake_case_ : Tuple = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError("Cannot specify both BYO and on-demand cluster args") snake_case_ : List[str] = rh.cluster( name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path} ) else: snake_case_ : Tuple = rh.cluster( name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) snake_case_ : int = args.example.rsplit("/", 1)[0] # Set up remote environment cluster.install_packages(["pip:./"]) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([f"pip install -r transformers/examples/{example_dir}/requirements.txt"]) cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"]) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([f"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"]) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
169
import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def A (__A : Tuple , __A : List[Any]=None ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ = None if token is not None: UpperCAmelCase_ = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""} UpperCAmelCase_ = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100""" UpperCAmelCase_ = requests.get(__A , headers=__A ).json() UpperCAmelCase_ = {} try: job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} ) UpperCAmelCase_ = math.ceil((result['''total_count'''] - 100) / 100 ) for i in range(__A ): UpperCAmelCase_ = requests.get(url + F"""&page={i + 2}""" , headers=__A ).json() job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} ) return job_links except Exception: print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} def A (__A : Union[str, Any] , __A : Optional[Any]=None ) -> Any: """simple docstring""" UpperCAmelCase_ = None if token is not None: UpperCAmelCase_ = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""} UpperCAmelCase_ = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100""" UpperCAmelCase_ = requests.get(__A , headers=__A ).json() UpperCAmelCase_ = {} try: artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} ) UpperCAmelCase_ = math.ceil((result['''total_count'''] - 100) / 100 ) for i in range(__A ): UpperCAmelCase_ = requests.get(url + F"""&page={i + 2}""" , headers=__A ).json() artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} ) return artifacts except Exception: print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} def A (__A : Any , __A : Tuple , __A : Optional[int] , __A : List[str] ) -> List[str]: """simple docstring""" UpperCAmelCase_ = None if token is not None: UpperCAmelCase_ = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""} UpperCAmelCase_ = requests.get(__A , headers=__A , allow_redirects=__A ) UpperCAmelCase_ = result.headers['''Location'''] UpperCAmelCase_ = requests.get(__A , allow_redirects=__A ) UpperCAmelCase_ = os.path.join(__A , F"""{artifact_name}.zip""" ) with open(__A , '''wb''' ) as fp: fp.write(response.content ) def A (__A : Union[str, Any] , __A : Optional[int]=None ) -> int: """simple docstring""" UpperCAmelCase_ = [] UpperCAmelCase_ = [] UpperCAmelCase_ = None with zipfile.ZipFile(__A ) as z: for filename in z.namelist(): if not os.path.isdir(__A ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(__A ) as f: for line in f: UpperCAmelCase_ = line.decode('''UTF-8''' ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs UpperCAmelCase_ = line[: line.index(''': ''' )] UpperCAmelCase_ = line[line.index(''': ''' ) + len(''': ''' ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ): # `test` is the test method that failed UpperCAmelCase_ = line[len('''FAILED ''' ) :] failed_tests.append(__A ) elif filename == "job_name.txt": UpperCAmelCase_ = line if len(__A ) != len(__A ): raise ValueError( F"""`errors` and `failed_tests` should have the same number of elements. Got {len(__A )} for `errors` """ F"""and {len(__A )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some""" ''' problem.''' ) UpperCAmelCase_ = None if job_name and job_links: UpperCAmelCase_ = job_links.get(__A , __A ) # A list with elements of the form (line of error, error, failed test) UpperCAmelCase_ = [x + [y] + [job_link] for x, y in zip(__A , __A )] return result def A (__A : List[str] , __A : Any=None ) -> int: """simple docstring""" UpperCAmelCase_ = [] UpperCAmelCase_ = [os.path.join(__A , __A ) for p in os.listdir(__A ) if p.endswith('''.zip''' )] for p in paths: errors.extend(get_errors_from_single_artifact(__A , job_links=__A ) ) return errors def A (__A : Tuple , __A : Dict=None ) -> Dict: """simple docstring""" UpperCAmelCase_ = Counter() counter.update([x[1] for x in logs] ) UpperCAmelCase_ = counter.most_common() UpperCAmelCase_ = {} for error, count in counts: if error_filter is None or error not in error_filter: UpperCAmelCase_ = {'''count''': count, '''failed_tests''': [(x[2], x[0]) for x in logs if x[1] == error]} UpperCAmelCase_ = dict(sorted(r.items() , key=lambda __A : item[1]["count"] , reverse=__A ) ) return r def A (__A : str ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ = test.split('''::''' )[0] if test.startswith('''tests/models/''' ): UpperCAmelCase_ = test.split('''/''' )[2] else: UpperCAmelCase_ = None return test def A (__A : str , __A : int=None ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ = [(x[0], x[1], get_model(x[2] )) for x in logs] UpperCAmelCase_ = [x for x in logs if x[2] is not None] UpperCAmelCase_ = {x[2] for x in logs} UpperCAmelCase_ = {} for test in tests: UpperCAmelCase_ = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) UpperCAmelCase_ = counter.most_common() UpperCAmelCase_ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} UpperCAmelCase_ = sum(error_counts.values() ) if n_errors > 0: UpperCAmelCase_ = {'''count''': n_errors, '''errors''': error_counts} UpperCAmelCase_ = dict(sorted(r.items() , key=lambda __A : item[1]["count"] , reverse=__A ) ) return r def A (__A : Union[str, Any] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ = '''| no. | error | status |''' UpperCAmelCase_ = '''|-:|:-|:-|''' UpperCAmelCase_ = [header, sep] for error in reduced_by_error: UpperCAmelCase_ = reduced_by_error[error]['''count'''] UpperCAmelCase_ = F"""| {count} | {error[:100]} | |""" lines.append(__A ) return "\n".join(__A ) def A (__A : str ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ = '''| model | no. of errors | major error | count |''' UpperCAmelCase_ = '''|-:|-:|-:|-:|''' UpperCAmelCase_ = [header, sep] for model in reduced_by_model: UpperCAmelCase_ = reduced_by_model[model]['''count'''] UpperCAmelCase_ , UpperCAmelCase_ = list(reduced_by_model[model]['''errors'''].items() )[0] UpperCAmelCase_ = F"""| {model} | {count} | {error[:60]} | {_count} |""" lines.append(__A ) return "\n".join(__A ) if __name__ == "__main__": snake_case_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") parser.add_argument( "--output_dir", type=str, required=True, help="Where to store the downloaded artifacts and other result files.", ) parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.") snake_case_ : Union[str, Any] = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) snake_case_ : Dict = get_job_links(args.workflow_run_id, token=args.token) snake_case_ : Dict = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: snake_case_ : List[Any] = k.find(" / ") snake_case_ : List[str] = k[index + len(" / ") :] snake_case_ : Optional[int] = v with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) snake_case_ : Optional[int] = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) snake_case_ : Optional[Any] = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error snake_case_ : str = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors snake_case_ : Dict = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) snake_case_ : str = reduce_by_error(errors) snake_case_ : Optional[Any] = reduce_by_model(errors) snake_case_ : int = make_github_table(reduced_by_error) snake_case_ : Optional[int] = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp: fp.write(sa) with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp: fp.write(sa)
169
1
'''simple docstring''' import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase : int = logging.get_logger(__name__) lowercase : Optional[Any] = { 'vocab_file': 'vocab.json', 'tokenizer_config_file': 'tokenizer_config.json', 'merges_file': 'merges.txt', } lowercase : Any = { 'vocab_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json' ), }, 'tokenizer_config_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json' ), }, 'merges_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt' ), }, } lowercase : Optional[Any] = '</w>' lowercase : str = '@@ ' def __a ( A__ ) -> int: lowerCAmelCase = set() lowerCAmelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase = char return pairs # Speech2Text2 has no max input length lowercase : Tuple = {'facebook/s2t-wav2vec2-large-en-de': 1_0_2_4} class _lowerCAmelCase ( UpperCamelCase_ ): """simple docstring""" lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = ['input_ids', 'attention_mask'] def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int="<s>" , SCREAMING_SNAKE_CASE : Dict="<pad>" , SCREAMING_SNAKE_CASE : List[str]="</s>" , SCREAMING_SNAKE_CASE : str="<unk>" , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : List[Any]=None , **SCREAMING_SNAKE_CASE : Any , ) -> Union[str, Any]: """simple docstring""" super().__init__( unk_token=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) lowerCAmelCase = do_lower_case with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as vocab_handle: lowerCAmelCase = json.load(SCREAMING_SNAKE_CASE ) lowerCAmelCase = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(f"No merges files provided. {self.__class__.__name__} can only be used for decoding." ) lowerCAmelCase = None lowerCAmelCase = None else: with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as merges_handle: lowerCAmelCase = merges_handle.read().split("\n" )[:-1] lowerCAmelCase = [tuple(merge.split()[:2] ) for merge in merges] lowerCAmelCase = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) ) lowerCAmelCase = {} @property def __A ( self : int ) -> int: """simple docstring""" return len(self.decoder ) def __A ( self : Optional[Any] ) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def __A ( self : str , SCREAMING_SNAKE_CASE : Dict ) -> Tuple: """simple docstring""" lowerCAmelCase = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] lowerCAmelCase = get_pairs(SCREAMING_SNAKE_CASE ) if not pairs: return token while True: lowerCAmelCase = min(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : self.bpe_ranks.get(SCREAMING_SNAKE_CASE , float("inf" ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase , lowerCAmelCase = bigram lowerCAmelCase = [] lowerCAmelCase = 0 while i < len(SCREAMING_SNAKE_CASE ): try: lowerCAmelCase = word.index(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase = j if word[i] == first and i < len(SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase = tuple(SCREAMING_SNAKE_CASE ) lowerCAmelCase = new_word if len(SCREAMING_SNAKE_CASE ) == 1: break else: lowerCAmelCase = get_pairs(SCREAMING_SNAKE_CASE ) lowerCAmelCase = " ".join(SCREAMING_SNAKE_CASE ) if word == "\n " + BPE_TOKEN_MERGES: lowerCAmelCase = "\n" + BPE_TOKEN_MERGES if word.endswith(SCREAMING_SNAKE_CASE ): lowerCAmelCase = word.replace(SCREAMING_SNAKE_CASE , "" ) lowerCAmelCase = word.replace(" " , SCREAMING_SNAKE_CASE ) lowerCAmelCase = word return word def __A ( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple: """simple docstring""" if self.bpe_ranks is None: raise ValueError( "This tokenizer was instantiated without a `merges.txt` file, so" " that it can only be used for decoding, not for encoding." "Make sure to provide `merges.txt` file at instantiation to enable " "encoding." ) if self.do_lower_case: lowerCAmelCase = text.lower() lowerCAmelCase = text.split() lowerCAmelCase = [] for token in text: if token: split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE ).split(" " ) ) ) return split_tokens def __A ( self : Any , SCREAMING_SNAKE_CASE : str ) -> int: """simple docstring""" return self.encoder.get(SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) ) def __A ( self : Any , SCREAMING_SNAKE_CASE : int ) -> str: """simple docstring""" lowerCAmelCase = self.decoder.get(SCREAMING_SNAKE_CASE , self.unk_token ) return result def __A ( self : List[str] , SCREAMING_SNAKE_CASE : List[str] ) -> str: """simple docstring""" lowerCAmelCase = " ".join(SCREAMING_SNAKE_CASE ) # make sure @@ tokens are concatenated lowerCAmelCase = "".join(string.split(SCREAMING_SNAKE_CASE ) ) return string def __A ( self : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return lowerCAmelCase = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) lowerCAmelCase = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE , ensure_ascii=SCREAMING_SNAKE_CASE ) + "\n" ) lowerCAmelCase = 0 if self.bpe_ranks is None: return (vocab_file,) with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) lowerCAmelCase = token_index writer.write(" ".join(SCREAMING_SNAKE_CASE ) + "\n" ) index += 1 return (vocab_file, merges_file)
649
'''simple docstring''' from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowercase : str = logging.get_logger(__name__) lowercase : Optional[Any] = { 'nielsr/canine-s': 2_0_4_8, } # Unicode defines 1,114,112 total “codepoints” lowercase : Dict = 1_1_1_4_1_1_2 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py lowercase : List[str] = 0 lowercase : List[str] = 0Xe_000 lowercase : Optional[int] = 0Xe_001 lowercase : Union[str, Any] = 0Xe_002 lowercase : List[str] = 0Xe_003 lowercase : str = 0Xe_004 # Maps special codepoints to human-readable names. lowercase : Dict[int, str] = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. lowercase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class _lowerCAmelCase ( UpperCamelCase_ ): """simple docstring""" lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Dict , SCREAMING_SNAKE_CASE : Optional[int]=chr(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE : int=chr(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE : Tuple=chr(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE : Tuple=chr(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE : Union[str, Any]=chr(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE : int=chr(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : List[Any]=2_0_4_8 , **SCREAMING_SNAKE_CASE : Optional[Any] , ) -> List[Any]: """simple docstring""" lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else bos_token lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else eos_token lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else sep_token lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else cls_token lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token super().__init__( bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , model_max_length=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) # Creates a mapping for looking up the IDs of special symbols. lowerCAmelCase = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): lowerCAmelCase = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. lowerCAmelCase = { codepoint: name for name, codepoint in self._special_codepoints.items() } lowerCAmelCase = UNICODE_VOCAB_SIZE lowerCAmelCase = len(self._special_codepoints ) @property def __A ( self : List[Any] ) -> int: """simple docstring""" return self._unicode_vocab_size def __A ( self : str , SCREAMING_SNAKE_CASE : str ) -> List[str]: """simple docstring""" return list(SCREAMING_SNAKE_CASE ) def __A ( self : Dict , SCREAMING_SNAKE_CASE : str ) -> int: """simple docstring""" try: return ord(SCREAMING_SNAKE_CASE ) except TypeError: raise ValueError(f"invalid token: '{token}'" ) def __A ( self : str , SCREAMING_SNAKE_CASE : int ) -> str: """simple docstring""" try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(SCREAMING_SNAKE_CASE ) except TypeError: raise ValueError(f"invalid id: {index}" ) def __A ( self : Any , SCREAMING_SNAKE_CASE : str ) -> Optional[int]: """simple docstring""" return "".join(SCREAMING_SNAKE_CASE ) def __A ( self : List[Any] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] lowerCAmelCase = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def __A ( self : List[str] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE ) lowerCAmelCase = [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] if token_ids_a is not None: result += ([0] * len(SCREAMING_SNAKE_CASE )) + [1] return result def __A ( self : Dict , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] lowerCAmelCase = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def __A ( self : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ) -> str: """simple docstring""" return ()
649
1
import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]: assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def __lowerCAmelCase ( ) -> List[Any]: assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def __lowerCAmelCase ( ) -> Optional[Any]: lowerCAmelCase__ : str = '''mock-s3-bucket''' lowerCAmelCase__ : int = F"""s3://{mock_bucket}""" lowerCAmelCase__ : Dict = extract_path_from_uri(UpperCamelCase ) assert dataset_path.startswith('''s3://''' ) is False lowerCAmelCase__ : str = '''./local/path''' lowerCAmelCase__ : str = extract_path_from_uri(UpperCamelCase ) assert dataset_path == new_dataset_path def __lowerCAmelCase ( UpperCamelCase ) -> List[Any]: lowerCAmelCase__ : Any = is_remote_filesystem(UpperCamelCase ) assert is_remote is True lowerCAmelCase__ : List[Any] = fsspec.filesystem('''file''' ) lowerCAmelCase__ : Union[str, Any] = is_remote_filesystem(UpperCamelCase ) assert is_remote is False @pytest.mark.parametrize('''compression_fs_class''' , UpperCamelCase ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : Optional[int] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file} lowerCAmelCase__ : Any = input_paths[compression_fs_class.protocol] if input_path is None: lowerCAmelCase__ : int = F"""for '{compression_fs_class.protocol}' compression protocol, """ if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(UpperCamelCase ) lowerCAmelCase__ : Tuple = fsspec.filesystem(compression_fs_class.protocol , fo=UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = os.path.basename(UpperCamelCase ) lowerCAmelCase__ : Any = expected_filename[: expected_filename.rindex('''.''' )] assert fs.glob('''*''' ) == [expected_filename] with fs.open(UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f, open(UpperCamelCase , encoding='''utf-8''' ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]: lowerCAmelCase__ : str = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path} lowerCAmelCase__ : List[str] = compressed_file_paths[protocol] lowerCAmelCase__ : Tuple = '''dataset.jsonl''' lowerCAmelCase__ : Dict = F"""{protocol}://{member_file_path}::{compressed_file_path}""" lowerCAmelCase__ : Tuple = fsspec.get_fs_token_paths(UpperCamelCase ) assert fs.isfile(UpperCamelCase ) assert not fs.isfile('''non_existing_''' + member_file_path ) @pytest.mark.integration def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: lowerCAmelCase__ : Optional[int] = hf_api.dataset_info(UpperCamelCase , token=UpperCamelCase ) lowerCAmelCase__ : List[Any] = HfFileSystem(repo_info=UpperCamelCase , token=UpperCamelCase ) assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"] assert hffs.isdir('''data''' ) assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' ) with open(UpperCamelCase ) as f: assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read() def __lowerCAmelCase ( ) -> List[str]: lowerCAmelCase__ : List[str] = '''bz2''' # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(UpperCamelCase , UpperCamelCase , clobber=UpperCamelCase ) with pytest.warns(UpperCamelCase ) as warning_info: importlib.reload(datasets.filesystems ) assert len(UpperCamelCase ) == 1 assert ( str(warning_info[0].message ) == F"""A filesystem protocol was already set for {protocol} and will be overwritten.""" )
711
def __lowerCAmelCase ( UpperCamelCase ) -> bool: if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(UpperCamelCase ) == 0: raise ValueError('''Input list must be a non empty list''' ) if len(UpperCamelCase ) == 1: return True lowerCAmelCase__ : Tuple = series[1] - series[0] for index in range(len(UpperCamelCase ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def __lowerCAmelCase ( UpperCamelCase ) -> float: if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(UpperCamelCase ) == 0: raise ValueError('''Input list must be a non empty list''' ) lowerCAmelCase__ : int = 0 for val in series: answer += val return answer / len(UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
470
0
import math import random from typing import Any from .hill_climbing import SearchProblem def __lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] = True , UpperCAmelCase__ : Tuple = math.inf , UpperCAmelCase__ : Union[str, Any] = -math.inf , UpperCAmelCase__ : List[str] = math.inf , UpperCAmelCase__ : Optional[Any] = -math.inf , UpperCAmelCase__ : List[Any] = False , UpperCAmelCase__ : Tuple = 1_0_0 , UpperCAmelCase__ : Union[str, Any] = 0.0_1 , UpperCAmelCase__ : List[str] = 1 , ) -> Any: lowerCamelCase_ = False lowerCamelCase_ = search_prob lowerCamelCase_ = start_temperate lowerCamelCase_ = [] lowerCamelCase_ = 0 lowerCamelCase_ = None while not search_end: lowerCamelCase_ = current_state.score() if best_state is None or current_score > best_state.score(): lowerCamelCase_ = current_state scores.append(snake_case__ ) iterations += 1 lowerCamelCase_ = None lowerCamelCase_ = current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to lowerCamelCase_ = random.randint(0 , len(snake_case__ ) - 1 ) # picking a random neighbor lowerCamelCase_ = neighbors.pop(snake_case__ ) lowerCamelCase_ = picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: lowerCamelCase_ = change * -1 # in case we are finding minimum if change > 0: # improves the solution lowerCamelCase_ = picked_neighbor else: lowerCamelCase_ = (math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability lowerCamelCase_ = picked_neighbor lowerCamelCase_ = current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor lowerCamelCase_ = True else: lowerCamelCase_ = next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(snake_case__ ) , snake_case__ ) plt.xlabel("""Iterations""" ) plt.ylabel("""Function values""" ) plt.show() return best_state if __name__ == "__main__": def __lowerCAmelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] ) -> List[Any]: return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) lowercase = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa) lowercase = simulated_annealing( prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True ) print( '''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ''' F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}""" ) # starting the problem with initial coordinates (12, 47) lowercase = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa) lowercase = simulated_annealing( prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True ) print( '''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ''' F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}""" ) def __lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str ) -> Tuple: return (3 * x**2) - (6 * y) lowercase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) lowercase = simulated_annealing(prob, find_max=False, visualization=True) print( '''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ''' F"""{local_min.score()}""" ) lowercase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) lowercase = simulated_annealing(prob, find_max=True, visualization=True) print( '''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ''' F"""{local_min.score()}""" )
272
from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class __snake_case ( SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE__ = 42 class __snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): @register_to_config def __init__( self ,a_ = 16 ,a_ = 88 ,a_ = None ,a_ = None ,a_ = 1 ,a_ = 0.0 ,a_ = 32 ,a_ = None ,a_ = False ,a_ = None ,a_ = "geglu" ,a_ = True ,a_ = True ,): """simple docstring""" super().__init__() lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = attention_head_dim lowerCAmelCase__ = num_attention_heads * attention_head_dim lowerCAmelCase__ = in_channels lowerCAmelCase__ = torch.nn.GroupNorm(num_groups=a_ ,num_channels=a_ ,eps=1e-6 ,affine=a_ ) lowerCAmelCase__ = nn.Linear(a_ ,a_ ) # 3. Define transformers blocks lowerCAmelCase__ = nn.ModuleList( [ BasicTransformerBlock( a_ ,a_ ,a_ ,dropout=a_ ,cross_attention_dim=a_ ,activation_fn=a_ ,attention_bias=a_ ,double_self_attention=a_ ,norm_elementwise_affine=a_ ,) for d in range(a_ ) ] ) lowerCAmelCase__ = nn.Linear(a_ ,a_ ) def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_=None ,a_=None ,a_=None ,a_=1 ,a_=None ,a_ = True ,): """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = hidden_states.shape lowerCAmelCase__ = batch_frames // num_frames lowerCAmelCase__ = hidden_states lowerCAmelCase__ = hidden_states[None, :].reshape(a_ ,a_ ,a_ ,a_ ,a_ ) lowerCAmelCase__ = hidden_states.permute(0 ,2 ,1 ,3 ,4 ) lowerCAmelCase__ = self.norm(a_ ) lowerCAmelCase__ = hidden_states.permute(0 ,3 ,4 ,2 ,1 ).reshape(batch_size * height * width ,a_ ,a_ ) lowerCAmelCase__ = self.proj_in(a_ ) # 2. Blocks for block in self.transformer_blocks: lowerCAmelCase__ = block( a_ ,encoder_hidden_states=a_ ,timestep=a_ ,cross_attention_kwargs=a_ ,class_labels=a_ ,) # 3. Output lowerCAmelCase__ = self.proj_out(a_ ) lowerCAmelCase__ = ( hidden_states[None, None, :] .reshape(a_ ,a_ ,a_ ,a_ ,a_ ) .permute(0 ,3 ,4 ,1 ,2 ) .contiguous() ) lowerCAmelCase__ = hidden_states.reshape(a_ ,a_ ,a_ ,a_ ) lowerCAmelCase__ = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=a_ )
193
0
import math def _lowerCAmelCase ( _a : list , _a : int = 0 , _a : int = 0 ) -> list: lowerCAmelCase_ : List[str] = end or len(_a ) for i in range(_a , _a ): lowerCAmelCase_ : List[Any] = i lowerCAmelCase_ : Tuple = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: lowerCAmelCase_ : Tuple = array[temp_index - 1] temp_index -= 1 lowerCAmelCase_ : Optional[Any] = temp_index_value return array def _lowerCAmelCase ( _a : list , _a : int , _a : int ) -> None: # Max Heap lowerCAmelCase_ : List[str] = index lowerCAmelCase_ : Tuple = 2 * index + 1 # Left Node lowerCAmelCase_ : Any = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: lowerCAmelCase_ : Optional[int] = left_index if right_index < heap_size and array[largest] < array[right_index]: lowerCAmelCase_ : Union[str, Any] = right_index if largest != index: lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = array[largest], array[index] heapify(_a , _a , _a ) def _lowerCAmelCase ( _a : list ) -> list: lowerCAmelCase_ : List[Any] = len(_a ) for i in range(n // 2 , -1 , -1 ): heapify(_a , _a , _a ) for i in range(n - 1 , 0 , -1 ): lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = array[0], array[i] heapify(_a , 0 , _a ) return array def _lowerCAmelCase ( _a : list , _a : int , _a : int , _a : int ) -> int: if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def _lowerCAmelCase ( _a : list , _a : int , _a : int , _a : int ) -> int: lowerCAmelCase_ : Union[str, Any] = low lowerCAmelCase_ : List[Any] = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i lowerCAmelCase_ , lowerCAmelCase_ : Any = array[j], array[i] i += 1 def _lowerCAmelCase ( _a : list ) -> list: if len(_a ) == 0: return array lowerCAmelCase_ : int = 2 * math.ceil(math.loga(len(_a ) ) ) lowerCAmelCase_ : Tuple = 16 return intro_sort(_a , 0 , len(_a ) , _a , _a ) def _lowerCAmelCase ( _a : list , _a : int , _a : int , _a : int , _a : int ) -> list: while end - start > size_threshold: if max_depth == 0: return heap_sort(_a ) max_depth -= 1 lowerCAmelCase_ : Tuple = median_of_a(_a , _a , start + ((end - start) // 2) + 1 , end - 1 ) lowerCAmelCase_ : Optional[int] = partition(_a , _a , _a , _a ) intro_sort(_a , _a , _a , _a , _a ) lowerCAmelCase_ : int = p return insertion_sort(_a , _a , _a ) if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ : List[Any] = input("""Enter numbers separated by a comma : """).strip() UpperCAmelCase_ : Optional[int] = [float(item) for item in user_input.split(""",""")] print(sort(unsorted))
440
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class lowercase__ ( unittest.TestCase ): __UpperCamelCase = inspect.getfile(accelerate.test_utils ) __UpperCamelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_cli.py"""] ) __UpperCamelCase = ["""accelerate""", """launch"""] __UpperCamelCase = Path.home() / """.cache/huggingface/accelerate""" __UpperCamelCase = """default_config.yaml""" __UpperCamelCase = config_folder / config_file __UpperCamelCase = config_folder / """_default_config.yaml""" __UpperCamelCase = Path("""tests/test_configs""" ) @classmethod def UpperCAmelCase__ ( cls ): if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path ) @classmethod def UpperCAmelCase__ ( cls ): if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path ) def UpperCAmelCase__ ( self ): lowerCAmelCase_ : int = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() ) def UpperCAmelCase__ ( self ): for config in sorted(self.test_config_path.glob("""**/*.yaml""" ) ): with self.subTest(config_file=_lowercase ): execute_subprocess_async( self.base_cmd + ["""--config_file""", str(_lowercase ), self.test_file_path] , env=os.environ.copy() ) def UpperCAmelCase__ ( self ): execute_subprocess_async(["""accelerate""", """test"""] , env=os.environ.copy() ) class lowercase__ ( unittest.TestCase ): __UpperCamelCase = """test-tpu""" __UpperCamelCase = """us-central1-a""" __UpperCamelCase = """ls""" __UpperCamelCase = ["""accelerate""", """tpu-config"""] __UpperCamelCase = """cd /usr/share""" __UpperCamelCase = """tests/test_samples/test_command_file.sh""" __UpperCamelCase = """Running gcloud compute tpus tpu-vm ssh""" def UpperCAmelCase__ ( self ): lowerCAmelCase_ : str = run_command( self.cmd + ["""--command""", self.command, """--tpu_zone""", self.tpu_zone, """--tpu_name""", self.tpu_name, """--debug"""] , return_stdout=_lowercase , ) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , _lowercase , ) def UpperCAmelCase__ ( self ): lowerCAmelCase_ : str = run_command( self.cmd + [ """--config_file""", """tests/test_configs/0_12_0.yaml""", """--command""", self.command, """--tpu_zone""", self.tpu_zone, """--tpu_name""", self.tpu_name, """--debug""", ] , return_stdout=_lowercase , ) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , _lowercase , ) def UpperCAmelCase__ ( self ): lowerCAmelCase_ : Optional[Any] = run_command( self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--debug"""] , return_stdout=_lowercase ) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , _lowercase , ) def UpperCAmelCase__ ( self ): lowerCAmelCase_ : Tuple = run_command( self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--command""", self.command, """--debug"""] , return_stdout=_lowercase , ) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , _lowercase , ) def UpperCAmelCase__ ( self ): lowerCAmelCase_ : str = run_command( self.cmd + [ """--config_file""", """tests/test_configs/latest.yaml""", """--command""", self.command, """--command""", """echo \"Hello World\"""", """--debug""", ] , return_stdout=_lowercase , ) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all' , _lowercase , ) def UpperCAmelCase__ ( self ): lowerCAmelCase_ : str = run_command( self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--command_file""", self.command_file, """--debug"""] , return_stdout=_lowercase , ) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , _lowercase , ) def UpperCAmelCase__ ( self ): lowerCAmelCase_ : Optional[Any] = run_command( self.cmd + [ """--config_file""", """tests/test_configs/0_12_0.yaml""", """--command_file""", self.command_file, """--tpu_zone""", self.tpu_zone, """--tpu_name""", self.tpu_name, """--debug""", ] , return_stdout=_lowercase , ) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , _lowercase , ) def UpperCAmelCase__ ( self ): lowerCAmelCase_ : Tuple = run_command( self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--install_accelerate""", """--debug"""] , return_stdout=_lowercase , ) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all' , _lowercase , ) def UpperCAmelCase__ ( self ): lowerCAmelCase_ : Any = run_command( self.cmd + [ """--config_file""", """tests/test_configs/latest.yaml""", """--install_accelerate""", """--accelerate_version""", """12.0.0""", """--debug""", ] , return_stdout=_lowercase , ) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all' , _lowercase , )
440
1
"""simple docstring""" def __snake_case ( _lowercase ): """simple docstring""" if n == 1 or not isinstance(_lowercase ,_lowercase ): return 0 elif n == 2: return 1 else: UpperCamelCase = [0, 1] for i in range(2 ,n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def __snake_case ( _lowercase ): """simple docstring""" UpperCamelCase = 0 UpperCamelCase = 2 while digits < n: index += 1 UpperCamelCase = len(str(fibonacci(_lowercase ) ) ) return index def __snake_case ( _lowercase = 1000 ): """simple docstring""" return fibonacci_digits_index(_lowercase ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
34
"""simple docstring""" import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = {'vocab_file': 'vocab.txt'} SCREAMING_SNAKE_CASE_ = { 'vocab_file': { 'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt', }, } SCREAMING_SNAKE_CASE_ = { 'openbmb/cpm-ant-10b': 1024, } def __snake_case ( _lowercase ): """simple docstring""" UpperCamelCase = collections.OrderedDict() with open(_lowercase ,'''r''' ,encoding='''utf-8''' ) as reader: UpperCamelCase = reader.readlines() for index, token in enumerate(_lowercase ): UpperCamelCase = token.rstrip('''\n''' ) UpperCamelCase = index return vocab class snake_case_ ( lowerCamelCase_ ): """simple docstring""" def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_=2_0_0) -> Any: UpperCamelCase = vocab UpperCamelCase = unk_token UpperCamelCase = max_input_chars_per_word def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]: UpperCamelCase = list(lowerCamelCase_) if len(lowerCamelCase_) > self.max_input_chars_per_word: return [self.unk_token] UpperCamelCase = 0 UpperCamelCase = [] while start < len(lowerCamelCase_): UpperCamelCase = len(lowerCamelCase_) UpperCamelCase = None while start < end: UpperCamelCase = ''''''.join(chars[start:end]) if substr in self.vocab: UpperCamelCase = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token) start += 1 else: sub_tokens.append(lowerCamelCase_) UpperCamelCase = end return sub_tokens class snake_case_ ( lowerCamelCase_ ): """simple docstring""" A_ = VOCAB_FILES_NAMES A_ = PRETRAINED_VOCAB_FILES_MAP A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ = ['''input_ids''', '''attention_mask'''] A_ = False def __init__( self , lowerCamelCase_ , lowerCamelCase_="<d>" , lowerCamelCase_="</d>" , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<unk>" , lowerCamelCase_="</n>" , lowerCamelCase_="</_>" , lowerCamelCase_="left" , **lowerCamelCase_ , ) -> List[str]: requires_backends(self , ['''jieba''']) super().__init__( bod_token=lowerCamelCase_ , eod_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , line_token=lowerCamelCase_ , space_token=lowerCamelCase_ , padding_side=lowerCamelCase_ , **lowerCamelCase_ , ) UpperCamelCase = bod_token UpperCamelCase = eod_token UpperCamelCase = load_vocab(lowerCamelCase_) UpperCamelCase = self.encoder[space_token] UpperCamelCase = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1])) UpperCamelCase = {v: k for k, v in self.encoder.items()} UpperCamelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token) @property def UpperCAmelCase__ ( self) -> Dict: return self.encoder[self.bod_token] @property def UpperCAmelCase__ ( self) -> str: return self.encoder[self.eod_token] @property def UpperCAmelCase__ ( self) -> List[Any]: return self.encoder["\n"] @property def UpperCAmelCase__ ( self) -> int: return len(self.encoder) def UpperCAmelCase__ ( self) -> Dict: return dict(self.encoder , **self.added_tokens_encoder) def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any: UpperCamelCase = [] for x in jieba.cut(lowerCamelCase_ , cut_all=lowerCamelCase_): output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase_)) return output_tokens def UpperCAmelCase__ ( self , lowerCamelCase_ , **lowerCamelCase_) -> Tuple: UpperCamelCase = [i for i in token_ids if i >= 0] UpperCamelCase = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(lowerCamelCase_ , **lowerCamelCase_) def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict: return token in self.encoder def UpperCAmelCase__ ( self , lowerCamelCase_) -> str: return "".join(lowerCamelCase_) def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]: return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token)) def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict: return self.decoder.get(lowerCamelCase_ , self.unk_token) def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]: if os.path.isdir(lowerCamelCase_): UpperCamelCase = os.path.join( lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) else: UpperCamelCase = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory UpperCamelCase = 0 if " " in self.encoder: UpperCamelCase = self.encoder[''' '''] del self.encoder[" "] if "\n" in self.encoder: UpperCamelCase = self.encoder['''\n'''] del self.encoder["\n"] UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1])) with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''') as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.' ''' Please check that the vocabulary is not corrupted!''') UpperCamelCase = token_index writer.write(token + '''\n''') index += 1 return (vocab_file,) def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]: if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_) if token_ids_a is not None: return [1] + ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_)) return [1] + ([0] * len(lowerCamelCase_))
34
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { """configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ """LILT_PRETRAINED_MODEL_ARCHIVE_LIST""", """LiltForQuestionAnswering""", """LiltForSequenceClassification""", """LiltForTokenClassification""", """LiltModel""", """LiltPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
470
import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( ) -> Optional[Any]: lowerCAmelCase__ : List[str] = 10 lowerCAmelCase__ : Optional[Any] = datasets.Features( { '''tokens''': datasets.Sequence(datasets.Value('''string''' ) ), '''labels''': datasets.Sequence(datasets.ClassLabel(names=['''negative''', '''positive'''] ) ), '''answers''': datasets.Sequence( { '''text''': datasets.Value('''string''' ), '''answer_start''': datasets.Value('''int32''' ), } ), '''id''': datasets.Value('''int64''' ), } ) lowerCAmelCase__ : List[Any] = datasets.Dataset.from_dict( { '''tokens''': [['''foo'''] * 5] * n, '''labels''': [[1] * 5] * n, '''answers''': [{'''answer_start''': [97], '''text''': ['''1976''']}] * 10, '''id''': list(range(UpperCamelCase ) ), } , features=UpperCamelCase , ) return dataset @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]: lowerCAmelCase__ : str = str(tmp_path_factory.mktemp('''data''' ) / '''file.arrow''' ) dataset.map(cache_file_name=UpperCamelCase ) return filename # FILE_CONTENT + files lowerCAmelCase_ = """\ Text data. Second line of data.""" @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]: lowerCAmelCase__ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt''' lowerCAmelCase__ : int = FILE_CONTENT with open(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase ) return filename @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Optional[Any]: import bza lowerCAmelCase__ : List[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.bz2''' lowerCAmelCase__ : Optional[Any] = bytes(UpperCamelCase , '''utf-8''' ) with bza.open(UpperCamelCase , '''wb''' ) as f: f.write(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> List[Any]: import gzip lowerCAmelCase__ : List[str] = str(tmp_path_factory.mktemp('''data''' ) / '''file.txt.gz''' ) lowerCAmelCase__ : Any = bytes(UpperCamelCase , '''utf-8''' ) with gzip.open(UpperCamelCase , '''wb''' ) as f: f.write(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Tuple: if datasets.config.LZ4_AVAILABLE: import lza.frame lowerCAmelCase__ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.lz4''' lowerCAmelCase__ : Tuple = bytes(UpperCamelCase , '''utf-8''' ) with lza.frame.open(UpperCamelCase , '''wb''' ) as f: f.write(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[Any]: if datasets.config.PY7ZR_AVAILABLE: import pyazr lowerCAmelCase__ : int = tmp_path_factory.mktemp('''data''' ) / '''file.txt.7z''' with pyazr.SevenZipFile(UpperCamelCase , '''w''' ) as archive: archive.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: import tarfile lowerCAmelCase__ : List[str] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.tar''' with tarfile.TarFile(UpperCamelCase , '''w''' ) as f: f.add(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]: import lzma lowerCAmelCase__ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.xz''' lowerCAmelCase__ : str = bytes(UpperCamelCase , '''utf-8''' ) with lzma.open(UpperCamelCase , '''wb''' ) as f: f.write(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Any: import zipfile lowerCAmelCase__ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> List[str]: if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd lowerCAmelCase__ : Tuple = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zst''' lowerCAmelCase__ : int = bytes(UpperCamelCase , '''utf-8''' ) with zstd.open(UpperCamelCase , '''wb''' ) as f: f.write(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Dict: lowerCAmelCase__ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''file.xml''' lowerCAmelCase__ : Tuple = textwrap.dedent( '''\ <?xml version="1.0" encoding="UTF-8" ?> <tmx version="1.4"> <header segtype="sentence" srclang="ca" /> <body> <tu> <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv> <tuv xml:lang="en"><seg>Content 1</seg></tuv> </tu> <tu> <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv> <tuv xml:lang="en"><seg>Content 2</seg></tuv> </tu> <tu> <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv> <tuv xml:lang="en"><seg>Content 3</seg></tuv> </tu> <tu> <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv> <tuv xml:lang="en"><seg>Content 4</seg></tuv> </tu> <tu> <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv> <tuv xml:lang="en"><seg>Content 5</seg></tuv> </tu> </body> </tmx>''' ) with open(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase ) return filename lowerCAmelCase_ = [ {"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0}, {"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0}, {"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0}, {"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0}, ] lowerCAmelCase_ = [ {"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0}, {"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0}, ] lowerCAmelCase_ = { """col_1""": ["""0""", """1""", """2""", """3"""], """col_2""": [0, 1, 2, 3], """col_3""": [0.0, 1.0, 2.0, 3.0], } lowerCAmelCase_ = [ {"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0}, {"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1}, ] lowerCAmelCase_ = [ {"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0}, {"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0}, {"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0}, {"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0}, ] @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( ) -> Tuple: return DATA_DICT_OF_LISTS @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Any: lowerCAmelCase__ : str = datasets.Dataset.from_dict(UpperCamelCase ) lowerCAmelCase__ : List[str] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.arrow''' ) dataset.map(cache_file_name=UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Any: lowerCAmelCase__ : List[str] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.sqlite''' ) with contextlib.closing(sqlitea.connect(UpperCamelCase ) ) as con: lowerCAmelCase__ : int = con.cursor() cur.execute('''CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)''' ) for item in DATA: cur.execute('''INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)''' , tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> int: lowerCAmelCase__ : List[str] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.csv''' ) with open(UpperCamelCase , '''w''' , newline='''''' ) as f: lowerCAmelCase__ : List[str] = csv.DictWriter(UpperCamelCase , fieldnames=['''col_1''', '''col_2''', '''col_3'''] ) writer.writeheader() for item in DATA: writer.writerow(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> List[Any]: lowerCAmelCase__ : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.csv''' ) with open(UpperCamelCase , '''w''' , newline='''''' ) as f: lowerCAmelCase__ : Tuple = csv.DictWriter(UpperCamelCase , fieldnames=['''col_1''', '''col_2''', '''col_3'''] ) writer.writeheader() for item in DATA: writer.writerow(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[str]: import bza lowerCAmelCase__ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.bz2''' with open(UpperCamelCase , '''rb''' ) as f: lowerCAmelCase__ : List[str] = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(UpperCamelCase , '''wb''' ) as f: f.write(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.basename(csv_path.replace('''.csv''' , '''.CSV''' ) ) ) f.write(UpperCamelCase , arcname=os.path.basename(csva_path.replace('''.csv''' , '''.CSV''' ) ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str: lowerCAmelCase__ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.csv.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase ) ) ) f.write(UpperCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase ) ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Dict: lowerCAmelCase__ : str = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.parquet''' ) lowerCAmelCase__ : List[Any] = pa.schema( { '''col_1''': pa.string(), '''col_2''': pa.intaa(), '''col_3''': pa.floataa(), } ) with open(UpperCamelCase , '''wb''' ) as f: lowerCAmelCase__ : str = pq.ParquetWriter(UpperCamelCase , schema=UpperCamelCase ) lowerCAmelCase__ : List[Any] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(UpperCamelCase ) )] for k in DATA[0]} , schema=UpperCamelCase ) writer.write_table(UpperCamelCase ) writer.close() return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Dict: lowerCAmelCase__ : List[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' ) lowerCAmelCase__ : List[str] = {'''data''': DATA} with open(UpperCamelCase , '''w''' ) as f: json.dump(UpperCamelCase , UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Tuple: lowerCAmelCase__ : Tuple = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' ) lowerCAmelCase__ : Optional[Any] = {'''data''': DATA_DICT_OF_LISTS} with open(UpperCamelCase , '''w''' ) as f: json.dump(UpperCamelCase , UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> int: lowerCAmelCase__ : Union[str, Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl''' ) with open(UpperCamelCase , '''w''' ) as f: for item in DATA: f.write(json.dumps(UpperCamelCase ) + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> str: lowerCAmelCase__ : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.jsonl''' ) with open(UpperCamelCase , '''w''' ) as f: for item in DATA: f.write(json.dumps(UpperCamelCase ) + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_312.jsonl''' ) with open(UpperCamelCase , '''w''' ) as f: for item in DATA_312: f.write(json.dumps(UpperCamelCase ) + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> List[Any]: lowerCAmelCase__ : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset-str.jsonl''' ) with open(UpperCamelCase , '''w''' ) as f: for item in DATA_STR: f.write(json.dumps(UpperCamelCase ) + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> int: import gzip lowerCAmelCase__ : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt.gz''' ) with open(UpperCamelCase , '''rb''' ) as orig_file: with gzip.open(UpperCamelCase , '''wb''' ) as zipped_file: zipped_file.writelines(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]: import gzip lowerCAmelCase__ : int = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.gz''' ) with open(UpperCamelCase , '''rb''' ) as orig_file: with gzip.open(UpperCamelCase , '''wb''' ) as zipped_file: zipped_file.writelines(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: lowerCAmelCase__ : str = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]: lowerCAmelCase__ : List[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.join('''nested''' , os.path.basename(UpperCamelCase ) ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : Tuple = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.jsonl.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase ) ) ) f.write(UpperCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase ) ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: lowerCAmelCase__ : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.tar''' with tarfile.TarFile(UpperCamelCase , '''w''' ) as f: f.add(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) f.add(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]: lowerCAmelCase__ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.tar''' with tarfile.TarFile(UpperCamelCase , '''w''' ) as f: f.add(UpperCamelCase , arcname=os.path.join('''nested''' , os.path.basename(UpperCamelCase ) ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : Dict = ['''0''', '''1''', '''2''', '''3'''] lowerCAmelCase__ : Any = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt''' ) with open(UpperCamelCase , '''w''' ) as f: for item in data: f.write(item + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Any: lowerCAmelCase__ : int = ['''0''', '''1''', '''2''', '''3'''] lowerCAmelCase__ : int = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.txt''' ) with open(UpperCamelCase , '''w''' ) as f: for item in data: f.write(item + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]: lowerCAmelCase__ : Optional[Any] = ['''0''', '''1''', '''2''', '''3'''] lowerCAmelCase__ : str = tmp_path_factory.mktemp('''data''' ) / '''dataset.abc''' with open(UpperCamelCase , '''w''' ) as f: for item in data: f.write(item + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any: lowerCAmelCase__ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset.text.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: lowerCAmelCase__ : Any = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.text.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase ) ) ) f.write(UpperCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase ) ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: lowerCAmelCase__ : str = tmp_path_factory.mktemp('''data''' ) / '''dataset.ext.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.basename('''unsupported.ext''' ) ) f.write(UpperCamelCase , arcname=os.path.basename('''unsupported_2.ext''' ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Dict: lowerCAmelCase__ : List[Any] = '''\n'''.join(['''First''', '''Second\u2029with Unicode new line''', '''Third'''] ) lowerCAmelCase__ : Union[str, Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_with_unicode_new_lines.txt''' ) with open(UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( ) -> Optional[int]: return os.path.join('''tests''' , '''features''' , '''data''' , '''test_image_rgb.jpg''' ) @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( ) -> Optional[Any]: return os.path.join('''tests''' , '''features''' , '''data''' , '''test_audio_44100.wav''' ) @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: lowerCAmelCase__ : Tuple = tmp_path_factory.mktemp('''data''' ) / '''dataset.img.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ).replace('''.jpg''' , '''2.jpg''' ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Dict: lowerCAmelCase__ : Optional[Any] = tmp_path_factory.mktemp('''data_dir''' ) (data_dir / "subdir").mkdir() with open(data_dir / '''subdir''' / '''train.txt''' , '''w''' ) as f: f.write('''foo\n''' * 10 ) with open(data_dir / '''subdir''' / '''test.txt''' , '''w''' ) as f: f.write('''bar\n''' * 10 ) # hidden file with open(data_dir / '''subdir''' / '''.test.txt''' , '''w''' ) as f: f.write('''bar\n''' * 10 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / '''.subdir''' / '''train.txt''' , '''w''' ) as f: f.write('''foo\n''' * 10 ) with open(data_dir / '''.subdir''' / '''test.txt''' , '''w''' ) as f: f.write('''bar\n''' * 10 ) return data_dir
470
1
import math import sys def __lowerCAmelCase ( UpperCamelCase ) -> str: lowerCAmelCase__ : Dict = '''''' try: with open(UpperCamelCase , '''rb''' ) as binary_file: lowerCAmelCase__ : str = binary_file.read() for dat in data: lowerCAmelCase__ : Optional[int] = F"""{dat:08b}""" result += curr_byte return result except OSError: print('''File not accessible''' ) sys.exit() def __lowerCAmelCase ( UpperCamelCase ) -> str: lowerCAmelCase__ : Any = {'''0''': '''0''', '''1''': '''1'''} lowerCAmelCase__ , lowerCAmelCase__ : Dict = '''''', '''''' lowerCAmelCase__ : Tuple = len(UpperCamelCase ) for i in range(len(UpperCamelCase ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue lowerCAmelCase__ : Dict = lexicon[curr_string] result += last_match_id lowerCAmelCase__ : Optional[Any] = last_match_id + '''0''' if math.loga(UpperCamelCase ).is_integer(): lowerCAmelCase__ : int = {} for curr_key in list(UpperCamelCase ): lowerCAmelCase__ : Optional[int] = lexicon.pop(UpperCamelCase ) lowerCAmelCase__ : Tuple = new_lex lowerCAmelCase__ : Dict = last_match_id + '''1''' index += 1 lowerCAmelCase__ : Tuple = '''''' return result def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> None: lowerCAmelCase__ : Optional[int] = 8 try: with open(UpperCamelCase , '''wb''' ) as opened_file: lowerCAmelCase__ : Optional[Any] = [ to_write[i : i + byte_length] for i in range(0 , len(UpperCamelCase ) , UpperCamelCase ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('''10000000''' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder='''big''' ) ) except OSError: print('''File not accessible''' ) sys.exit() def __lowerCAmelCase ( UpperCamelCase ) -> str: lowerCAmelCase__ : Any = 0 for letter in data_bits: if letter == "1": break counter += 1 lowerCAmelCase__ : Optional[Any] = data_bits[counter:] lowerCAmelCase__ : List[str] = data_bits[counter + 1 :] return data_bits def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> None: lowerCAmelCase__ : Optional[int] = read_file_binary(UpperCamelCase ) lowerCAmelCase__ : Any = remove_prefix(UpperCamelCase ) lowerCAmelCase__ : str = decompress_data(UpperCamelCase ) write_file_binary(UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
678
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class _lowerCAmelCase ( _lowercase ): A__ = 'donut-swin' A__ = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 12, 24] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = image_size lowerCAmelCase__ : List[str] = patch_size lowerCAmelCase__ : int = num_channels lowerCAmelCase__ : Optional[Any] = embed_dim lowerCAmelCase__ : int = depths lowerCAmelCase__ : Dict = len(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = num_heads lowerCAmelCase__ : Dict = window_size lowerCAmelCase__ : str = mlp_ratio lowerCAmelCase__ : Optional[int] = qkv_bias lowerCAmelCase__ : Any = hidden_dropout_prob lowerCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob lowerCAmelCase__ : List[str] = drop_path_rate lowerCAmelCase__ : Tuple = hidden_act lowerCAmelCase__ : List[str] = use_absolute_embeddings lowerCAmelCase__ : Dict = layer_norm_eps lowerCAmelCase__ : Any = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCAmelCase__ : List[Any] = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
678
1
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class _SCREAMING_SNAKE_CASE ( UpperCamelCase_ ): def __init__( self , lowercase , lowercase , lowercase = None , lowercase = None , lowercase = False , **lowercase , ) -> Union[str, Any]: super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a ) lowerCamelCase_ = Sql( cache_dir=_a , features=_a , sql=_a , con=_a , **_a , ) def SCREAMING_SNAKE_CASE_( self ) -> int: lowerCamelCase_ = None lowerCamelCase_ = None lowerCamelCase_ = None lowerCamelCase_ = None self.builder.download_and_prepare( download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , ) # Build dataset for splits lowerCamelCase_ = self.builder.as_dataset( split="train" , verification_mode=_a , in_memory=self.keep_in_memory ) return dataset class _SCREAMING_SNAKE_CASE : def __init__( self , lowercase , lowercase , lowercase , lowercase = None , lowercase = None , **lowercase , ) -> Any: if num_proc is not None and num_proc <= 0: raise ValueError(f'num_proc {num_proc} must be an integer > 0.' ) lowerCamelCase_ = dataset lowerCamelCase_ = name lowerCamelCase_ = con lowerCamelCase_ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE lowerCamelCase_ = num_proc lowerCamelCase_ = to_sql_kwargs def SCREAMING_SNAKE_CASE_( self ) -> int: lowerCamelCase_ = self.to_sql_kwargs.pop("sql" , _a ) lowerCamelCase_ = self.to_sql_kwargs.pop("con" , _a ) lowerCamelCase_ = self.to_sql_kwargs.pop("index" , _a ) lowerCamelCase_ = self._write(index=_a , **self.to_sql_kwargs ) return written def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Union[str, Any]: lowerCamelCase_ = args lowerCamelCase_ = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs lowerCamelCase_ = query_table( table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , ) lowerCamelCase_ = batch.to_pandas() lowerCamelCase_ = df.to_sql(self.name , self.con , index=_a , **_a ) return num_rows or len(_a ) def SCREAMING_SNAKE_CASE_( self , lowercase , **lowercase ) -> int: lowerCamelCase_ = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: lowerCamelCase_ = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += num_rows return written
712
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets __A =datasets.logging.get_logger(__name__) __A ='''\ @InProceedings{moosavi2019minimum, author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube}, title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection}, year = {2019}, booktitle = {Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, publisher = {Association for Computational Linguistics}, address = {Florence, Italy}, } @inproceedings{10.3115/1072399.1072405, author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette}, title = {A Model-Theoretic Coreference Scoring Scheme}, year = {1995}, isbn = {1558604022}, publisher = {Association for Computational Linguistics}, address = {USA}, url = {https://doi.org/10.3115/1072399.1072405}, doi = {10.3115/1072399.1072405}, booktitle = {Proceedings of the 6th Conference on Message Understanding}, pages = {45–52}, numpages = {8}, location = {Columbia, Maryland}, series = {MUC6 ’95} } @INPROCEEDINGS{Bagga98algorithmsfor, author = {Amit Bagga and Breck Baldwin}, title = {Algorithms for Scoring Coreference Chains}, booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference}, year = {1998}, pages = {563--566} } @INPROCEEDINGS{Luo05oncoreference, author = {Xiaoqiang Luo}, title = {On coreference resolution performance metrics}, booktitle = {In Proc. of HLT/EMNLP}, year = {2005}, pages = {25--32}, publisher = {URL} } @inproceedings{moosavi-strube-2016-coreference, title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric", author = "Moosavi, Nafise Sadat and Strube, Michael", booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)", month = aug, year = "2016", address = "Berlin, Germany", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/P16-1060", doi = "10.18653/v1/P16-1060", pages = "632--642", } ''' __A ='''\ CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which implements of the common evaluation metrics including MUC [Vilain et al, 1995], B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005], LEA [Moosavi and Strube, 2016] and the averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) [Denis and Baldridge, 2009a; Pradhan et al., 2011]. This wrapper of CoVal currently only work with CoNLL line format: The CoNLL format has one word per line with all the annotation for this word in column separated by spaces: Column Type Description 1 Document ID This is a variation on the document filename 2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc. 3 Word number 4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release. 5 Part-of-Speech 6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column. 7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-" 8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7. 9 Word sense This is the word sense of the word in Column 3. 10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data. 11 Named Entities These columns identifies the spans representing various named entities. 12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7. N Coreference Coreference chain information encoded in a parenthesis structure. More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md CoVal code was written by @ns-moosavi. Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py The test suite is taken from https://github.com/conll/reference-coreference-scorers/ Mention evaluation and the test suite are added by @andreasvc. Parsing CoNLL files is developed by Leo Born. ''' __A =''' Calculates coreference evaluation metrics. Args: predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format. Each prediction is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format. Each reference is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. keep_singletons: After extracting all mentions of key or system files, mentions whose corresponding coreference chain is of size one, are considered as singletons. The default evaluation mode will include singletons in evaluations if they are included in the key or the system files. By setting \'keep_singletons=False\', all singletons in the key and system files will be excluded from the evaluation. NP_only: Most of the recent coreference resolvers only resolve NP mentions and leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs. min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans. Minimum spans are determined using the MINA algorithm. Returns: \'mentions\': mentions \'muc\': MUC metric [Vilain et al, 1995] \'bcub\': B-cubed [Bagga and Baldwin, 1998] \'ceafe\': CEAFe [Luo et al., 2005] \'lea\': LEA [Moosavi and Strube, 2016] \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) Examples: >>> coval = datasets.load_metric(\'coval\') >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\', ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\', ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\', ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\', ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\', ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\'] >>> references = [words] >>> predictions = [words] >>> results = coval.compute(predictions=predictions, references=references) >>> print(results) # doctest:+ELLIPSIS {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0} ''' def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__="dummy_doc" ): lowerCamelCase_ = {doc: key_lines} lowerCamelCase_ = {doc: sys_lines} lowerCamelCase_ = {} lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCamelCase__ , key_doc_lines[doc] , lowerCamelCase__ ) key_singletons_num += singletons_num if NP_only or min_span: lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCamelCase__ , key_doc_lines[doc] , lowerCamelCase__ , lowerCamelCase__ ) lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCamelCase__ , sys_doc_lines[doc] , lowerCamelCase__ ) sys_singletons_num += singletons_num if NP_only or min_span: lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCamelCase__ , key_doc_lines[doc] , lowerCamelCase__ , lowerCamelCase__ ) if remove_nested: lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCamelCase__ , lowerCamelCase__ ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCamelCase__ , lowerCamelCase__ ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters lowerCamelCase_ = reader.get_mention_assignments(lowerCamelCase__ , lowerCamelCase__ ) lowerCamelCase_ = reader.get_mention_assignments(lowerCamelCase__ , lowerCamelCase__ ) lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( "Number of removed nested coreferring mentions in the key " F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' ) logger.info( "Number of resulting singleton clusters in the key " F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' ) if not keep_singletons: logger.info( F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ' "files, respectively" ) return doc_coref_infos def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): lowerCamelCase_ = get_coref_infos(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowerCamelCase_ = {} lowerCamelCase_ = 0 lowerCamelCase_ = 0 for name, metric in metrics: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowerCamelCase__ , lowerCamelCase__ , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} ) logger.info( name.ljust(1_0 ) , F'Recall: {recall * 1_0_0:.2f}' , F' Precision: {precision * 1_0_0:.2f}' , F' F1: {fa * 1_0_0:.2f}' , ) if conll_subparts_num == 3: lowerCamelCase_ = (conll / 3) * 1_0_0 logger.info(F'CoNLL score: {conll:.2f}' ) output_scores.update({"conll_score": conll} ) return output_scores def lowerCamelCase_ ( lowerCamelCase__ ): lowerCamelCase_ = False for line in key_lines: if not line.startswith("#" ): if len(line.split() ) > 6: lowerCamelCase_ = line.split()[5] if not parse_col == "-": lowerCamelCase_ = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _SCREAMING_SNAKE_CASE ( datasets.Metric ): def SCREAMING_SNAKE_CASE_( self ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" ) ), "references": datasets.Sequence(datasets.Value("string" ) ), } ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[ "https://github.com/ns-moosavi/coval", "https://www.aclweb.org/anthology/P16-1060", "http://www.conll.cemantix.org/2012/data.html", ] , ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=True , lowercase=False , lowercase=False , lowercase=False ) -> Dict: lowerCamelCase_ = [ ("mentions", evaluator.mentions), ("muc", evaluator.muc), ("bcub", evaluator.b_cubed), ("ceafe", evaluator.ceafe), ("lea", evaluator.lea), ] if min_span: lowerCamelCase_ = util.check_gold_parse_annotation(lowercase ) if not has_gold_parse: raise NotImplementedError("References should have gold parse annotation to use 'min_span'." ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" lowerCamelCase_ = evaluate( key_lines=lowercase , sys_lines=lowercase , metrics=lowercase , NP_only=lowercase , remove_nested=lowercase , keep_singletons=lowercase , min_span=lowercase , ) return score
313
0
'''simple docstring''' import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class UpperCAmelCase_ (tf.keras.optimizers.schedules.LearningRateSchedule ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = None , ) -> Any: super().__init__() __lowerCamelCase : Optional[Any] = initial_learning_rate __lowerCamelCase : Optional[Any] = warmup_steps __lowerCamelCase : Union[str, Any] = power __lowerCamelCase : Optional[int] = decay_schedule_fn __lowerCamelCase : Any = name def __call__( self , SCREAMING_SNAKE_CASE_ ) -> str: with tf.name_scope(self.name or 'WarmUp' ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. __lowerCamelCase : str = tf.cast(SCREAMING_SNAKE_CASE_ , tf.floataa ) __lowerCamelCase : Optional[int] = tf.cast(self.warmup_steps , tf.floataa ) __lowerCamelCase : List[Any] = global_step_float / warmup_steps_float __lowerCamelCase : Optional[Any] = self.initial_learning_rate * tf.math.pow(SCREAMING_SNAKE_CASE_ , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=SCREAMING_SNAKE_CASE_ , ) def lowercase_ ( self ) -> Optional[Any]: return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def UpperCAmelCase__ ( UpperCAmelCase_ : float , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 0.9 , UpperCAmelCase_ : float = 0.999 , UpperCAmelCase_ : float = 1e-8 , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : Optional[List[str]] = None , ) -> int: __lowerCamelCase : int = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=UpperCAmelCase_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=UpperCAmelCase_ , ) if num_warmup_steps: __lowerCamelCase : str = WarmUp( initial_learning_rate=UpperCAmelCase_ , decay_schedule_fn=UpperCAmelCase_ , warmup_steps=UpperCAmelCase_ , ) if weight_decay_rate > 0.0: __lowerCamelCase : List[Any] = AdamWeightDecay( learning_rate=UpperCAmelCase_ , weight_decay_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=UpperCAmelCase_ , ) else: __lowerCamelCase : Tuple = tf.keras.optimizers.Adam( learning_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ = 0.0_0_1 , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = 0.9_9_9 , SCREAMING_SNAKE_CASE_ = 1E-7 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "AdamWeightDecay" , **SCREAMING_SNAKE_CASE_ , ) -> int: super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = weight_decay_rate __lowerCamelCase : str = include_in_weight_decay __lowerCamelCase : List[Any] = exclude_from_weight_decay @classmethod def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ ) -> Dict: __lowerCamelCase : Any = {'WarmUp': WarmUp} return super(SCREAMING_SNAKE_CASE_ , cls ).from_config(SCREAMING_SNAKE_CASE_ , custom_objects=SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: super(SCREAMING_SNAKE_CASE_ , self )._prepare_local(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = tf.constant( self.weight_decay_rate , name='adam_weight_decay_rate' ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: __lowerCamelCase : Tuple = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , ) return tf.no_op() def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> List[str]: __lowerCamelCase , __lowerCamelCase : Optional[Any] = list(zip(*SCREAMING_SNAKE_CASE_ ) ) return super(SCREAMING_SNAKE_CASE_ , self ).apply_gradients(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , name=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: if apply_state is None: return self._decayed_lr_t[var_dtype], {} __lowerCamelCase : Optional[int] = apply_state or {} __lowerCamelCase : Dict = apply_state.get((var_device, var_dtype) ) if coefficients is None: __lowerCamelCase : List[Any] = self._fallback_apply_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[Any] = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str: __lowerCamelCase , __lowerCamelCase : Dict = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) with tf.control_dependencies([decay] ): return super(SCREAMING_SNAKE_CASE_ , self )._resource_apply_dense(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Union[str, Any]: __lowerCamelCase , __lowerCamelCase : Tuple = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) with tf.control_dependencies([decay] ): return super(SCREAMING_SNAKE_CASE_ , self )._resource_apply_sparse(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> List[Any]: __lowerCamelCase : Any = super().get_config() config.update({'weight_decay_rate': self.weight_decay_rate} ) return config def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Dict: if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is not None: return False return True class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" def __init__( self ) -> Tuple: __lowerCamelCase : Tuple = [] __lowerCamelCase : Optional[Any] = None @property def lowercase_ ( self ) -> List[str]: if self._accum_steps is None: __lowerCamelCase : Tuple = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=SCREAMING_SNAKE_CASE_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def lowercase_ ( self ) -> List[str]: if not self._gradients: raise ValueError('The accumulator should be called first to initialize the gradients' ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self , SCREAMING_SNAKE_CASE_ ) -> str: if not self._gradients: __lowerCamelCase : List[str] = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(SCREAMING_SNAKE_CASE_ ) , trainable=SCREAMING_SNAKE_CASE_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(SCREAMING_SNAKE_CASE_ ) != len(self._gradients ): raise ValueError(f'Expected {len(self._gradients )} gradients, but got {len(SCREAMING_SNAKE_CASE_ )}' ) for accum_gradient, gradient in zip(self._gradients , SCREAMING_SNAKE_CASE_ ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(SCREAMING_SNAKE_CASE_ ) self._accum_steps.assign_add(1 ) def lowercase_ ( self ) -> int: if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(SCREAMING_SNAKE_CASE_ ) )
13
'''simple docstring''' A__ : dict[tuple[int, int, int], int] = {} def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int: # if we are absent twice, or late 3 consecutive days, # no further prize strings are possible if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on __lowerCamelCase : List[Any] = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one __lowerCamelCase : Tuple = _calculate(days - 1 , UpperCAmelCase_ , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 __lowerCamelCase : int = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter __lowerCamelCase : List[Any] = _calculate(days - 1 , UpperCAmelCase_ , 0 ) __lowerCamelCase : Optional[int] = state_late + state_absent + state_ontime __lowerCamelCase : Union[str, Any] = prizestrings return prizestrings def UpperCAmelCase__ ( UpperCAmelCase_ : int = 30 ) -> int: return _calculate(UpperCAmelCase_ , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
13
1
"""simple docstring""" import inspect import unittest class __UpperCAmelCase ( unittest.TestCase ): def UpperCAmelCase ( self : Tuple ) -> Any: '''simple docstring''' try: import diffusers # noqa: F401 except ImportError: assert False def UpperCAmelCase ( self : List[Any] ) -> Any: '''simple docstring''' import diffusers from diffusers.dependency_versions_table import deps a__ : Any = inspect.getmembers(a_ , inspect.isclass ) for cls_name, cls_module in all_classes: if "dummy_" in cls_module.__module__: for backend in cls_module._backends: if backend == "k_diffusion": a__ : str = "k-diffusion" elif backend == "invisible_watermark": a__ : List[Any] = "invisible-watermark" assert backend in deps, F"{backend} is not in the deps table!"
251
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def lowercase__ ( lowerCAmelCase__ : Union[str, Any] ) -> str: '''simple docstring''' return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def lowercase__ ( lowerCAmelCase__ : List[str] ) -> str: '''simple docstring''' a__ : Any = create_tensor(lowerCAmelCase__ ) a__ : Optional[Any] = gather(lowerCAmelCase__ ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def lowercase__ ( lowerCAmelCase__ : Dict ) -> Union[str, Any]: '''simple docstring''' a__ : str = [state.process_index] a__ : Optional[int] = gather_object(lowerCAmelCase__ ) assert len(lowerCAmelCase__ ) == state.num_processes, F"{gathered_obj}, {len(lowerCAmelCase__ )} != {state.num_processes}" assert gathered_obj == list(range(state.num_processes ) ), F"{gathered_obj} != {list(range(state.num_processes ) )}" def lowercase__ ( lowerCAmelCase__ : Optional[Any] ) -> Tuple: '''simple docstring''' a__ : str = create_tensor(lowerCAmelCase__ ) a__ : Any = broadcast(lowerCAmelCase__ ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def lowercase__ ( lowerCAmelCase__ : Dict ) -> Union[str, Any]: '''simple docstring''' # We need to pad the tensor with one more element if we are the main process # to ensure that we can pad if state.is_main_process: a__ : Any = torch.arange(state.num_processes + 1 ).to(state.device ) else: a__ : Union[str, Any] = torch.arange(state.num_processes ).to(state.device ) a__ : List[Any] = pad_across_processes(lowerCAmelCase__ ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def lowercase__ ( lowerCAmelCase__ : Dict ) -> str: '''simple docstring''' # For now runs on only two processes if state.num_processes != 2: return a__ : List[str] = create_tensor(lowerCAmelCase__ ) a__ : Union[str, Any] = reduce(lowerCAmelCase__ , "sum" ) a__ : List[str] = torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ ), F"{reduced_tensor} != {truth_tensor}" def lowercase__ ( lowerCAmelCase__ : List[str] ) -> int: '''simple docstring''' # For now runs on only two processes if state.num_processes != 2: return a__ : Tuple = create_tensor(lowerCAmelCase__ ) a__ : Dict = reduce(lowerCAmelCase__ , "mean" ) a__ : Tuple = torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ ), F"{reduced_tensor} != {truth_tensor}" def lowercase__ ( lowerCAmelCase__ : str ) -> Union[str, Any]: '''simple docstring''' # For xla_spawn (TPUs) main() def lowercase__ ( ) -> Optional[int]: '''simple docstring''' a__ : List[str] = PartialState() state.print(F"State: {state}" ) state.print("testing gather" ) test_gather(lowerCAmelCase__ ) state.print("testing gather_object" ) test_gather_object(lowerCAmelCase__ ) state.print("testing broadcast" ) test_broadcast(lowerCAmelCase__ ) state.print("testing pad_across_processes" ) test_pad_across_processes(lowerCAmelCase__ ) state.print("testing reduce_sum" ) test_reduce_sum(lowerCAmelCase__ ) state.print("testing reduce_mean" ) test_reduce_mean(lowerCAmelCase__ ) if __name__ == "__main__": main()
251
1
class A__ : """simple docstring""" def __init__( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] ): a__ : str = name a__ : Optional[int] = value a__ : Dict = weight def __repr__( self : Union[str, Any] ): return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def _UpperCamelCase( self : Dict ): return self.value def _UpperCamelCase( self : Optional[Any] ): return self.name def _UpperCamelCase( self : Optional[Any] ): return self.weight def _UpperCamelCase( self : Optional[int] ): return self.value / self.weight def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Optional[Any] = [] for i in range(len(__a ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def UpperCamelCase_ ( __a , __a , __a ) -> Union[str, Any]: a__ : List[str] = sorted(__a , key=__a , reverse=__a ) a__ : List[Any] = [] a__, a__ : Union[str, Any] = 0.0, 0.0 for i in range(len(__a ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def UpperCamelCase_ ( ) -> Union[str, Any]: pass if __name__ == "__main__": import doctest doctest.testmod()
37
import heapq def lowerCAmelCase_ ( lowerCamelCase ): __magic_name__ : list[list] =[] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(lowerCamelCase , [-1 * len(lowerCamelCase ), (key, value)] ) # chosen_vertices = set of chosen vertices __magic_name__ : Tuple =set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices __magic_name__ : Tuple =heapq.heappop(lowerCamelCase )[1][0] chosen_vertices.add(lowerCamelCase ) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: __magic_name__ : Tuple =elem[1][1].index(lowerCamelCase ) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(lowerCamelCase ) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
21
0
from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) class UpperCAmelCase__ ( A__ ): """simple docstring""" a = ["pixel_values"] def __init__( self : List[str] , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[int, float] = 1 / 255 , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , __lowerCamelCase : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **__lowerCamelCase : List[Any] , ) -> None: super().__init__(**__lowerCamelCase ) SCREAMING_SNAKE_CASE__ = size if size is not None else {'''shortest_edge''': 224} SCREAMING_SNAKE_CASE__ = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase ) SCREAMING_SNAKE_CASE__ = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} SCREAMING_SNAKE_CASE__ = get_size_dict(__lowerCamelCase , param_name='''crop_size''' ) SCREAMING_SNAKE_CASE__ = do_resize SCREAMING_SNAKE_CASE__ = size SCREAMING_SNAKE_CASE__ = resample SCREAMING_SNAKE_CASE__ = do_center_crop SCREAMING_SNAKE_CASE__ = crop_size SCREAMING_SNAKE_CASE__ = do_rescale SCREAMING_SNAKE_CASE__ = rescale_factor SCREAMING_SNAKE_CASE__ = do_normalize SCREAMING_SNAKE_CASE__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN SCREAMING_SNAKE_CASE__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD def lowercase_ ( self : List[Any] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : List[str] , ) -> np.ndarray: SCREAMING_SNAKE_CASE__ = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: SCREAMING_SNAKE_CASE__ = int((256 / 224) * size['''shortest_edge'''] ) SCREAMING_SNAKE_CASE__ = get_resize_output_image_size(__lowerCamelCase , size=__lowerCamelCase , default_to_square=__lowerCamelCase ) SCREAMING_SNAKE_CASE__ = {'''height''': output_size[0], '''width''': output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' ) return resize( __lowerCamelCase , size=(size_dict['''height'''], size_dict['''width''']) , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def lowercase_ ( self : Optional[Any] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Optional[int] , ) -> np.ndarray: SCREAMING_SNAKE_CASE__ = get_size_dict(__lowerCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' ) return center_crop(__lowerCamelCase , size=(size['''height'''], size['''width''']) , data_format=__lowerCamelCase , **__lowerCamelCase ) def lowercase_ ( self : str , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[int, float] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : str , ) -> np.ndarray: return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def lowercase_ ( self : List[str] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : int , ) -> np.ndarray: return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def lowercase_ ( self : List[Any] , __lowerCamelCase : ImageInput , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[Dict[str, int]] = None , __lowerCamelCase : PILImageResampling = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[Dict[str, int]] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[float] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[Union[float, Iterable[float]]] = None , __lowerCamelCase : Optional[Union[float, Iterable[float]]] = None , __lowerCamelCase : Optional[TensorType] = None , __lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCamelCase : Any , ) -> BatchFeature: SCREAMING_SNAKE_CASE__ = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE__ = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE__ = do_center_crop if do_center_crop is not None else self.do_center_crop SCREAMING_SNAKE_CASE__ = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE__ = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE__ = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE__ = image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE__ = image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE__ = size if size is not None else self.size SCREAMING_SNAKE_CASE__ = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase ) SCREAMING_SNAKE_CASE__ = crop_size if crop_size is not None else self.crop_size SCREAMING_SNAKE_CASE__ = get_size_dict(__lowerCamelCase , param_name='''crop_size''' ) SCREAMING_SNAKE_CASE__ = make_list_of_images(__lowerCamelCase ) if not valid_images(__lowerCamelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE__ = [to_numpy_array(__lowerCamelCase ) for image in images] if do_resize: SCREAMING_SNAKE_CASE__ = [self.resize(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) for image in images] if do_center_crop: SCREAMING_SNAKE_CASE__ = [self.center_crop(__lowerCamelCase , __lowerCamelCase ) for image in images] if do_rescale: SCREAMING_SNAKE_CASE__ = [self.rescale(__lowerCamelCase , __lowerCamelCase ) for image in images] if do_normalize: SCREAMING_SNAKE_CASE__ = [self.normalize(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) for image in images] SCREAMING_SNAKE_CASE__ = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images] SCREAMING_SNAKE_CASE__ = {'''pixel_values''': images} return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
719
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _SCREAMING_SNAKE_CASE : Dict = { '''configuration_longformer''': [ '''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongformerConfig''', '''LongformerOnnxConfig''', ], '''tokenization_longformer''': ['''LongformerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : Tuple = ['''LongformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : List[Any] = [ '''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongformerForMaskedLM''', '''LongformerForMultipleChoice''', '''LongformerForQuestionAnswering''', '''LongformerForSequenceClassification''', '''LongformerForTokenClassification''', '''LongformerModel''', '''LongformerPreTrainedModel''', '''LongformerSelfAttention''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : List[Any] = [ '''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLongformerForMaskedLM''', '''TFLongformerForMultipleChoice''', '''TFLongformerForQuestionAnswering''', '''TFLongformerForSequenceClassification''', '''TFLongformerForTokenClassification''', '''TFLongformerModel''', '''TFLongformerPreTrainedModel''', '''TFLongformerSelfAttention''', ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys _SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
472
0
"""simple docstring""" SCREAMING_SNAKE_CASE_ = 9.80665 def __snake_case ( _lowercase ,_lowercase ,_lowercase = g ): """simple docstring""" if fluid_density <= 0: raise ValueError('''Impossible fluid density''' ) if volume < 0: raise ValueError('''Impossible Object volume''' ) if gravity <= 0: raise ValueError('''Impossible Gravity''' ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
34
"""simple docstring""" import operator def __snake_case ( _lowercase ,_lowercase = False ,_lowercase = None ): """simple docstring""" UpperCamelCase = operator.lt if reverse else operator.gt UpperCamelCase = solution or [] if not arr: return solution UpperCamelCase = [arr.pop(0 )] for i, item in enumerate(_lowercase ): if _operator(_lowercase ,sublist[-1] ): sublist.append(_lowercase ) arr.pop(_lowercase ) # merging sublist into solution list if not solution: solution.extend(_lowercase ) else: while sublist: UpperCamelCase = sublist.pop(0 ) for i, xx in enumerate(_lowercase ): if not _operator(_lowercase ,_lowercase ): solution.insert(_lowercase ,_lowercase ) break else: solution.append(_lowercase ) strand_sort(_lowercase ,_lowercase ,_lowercase ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
34
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ : List[str] = { 'configuration_clipseg': [ 'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CLIPSegConfig', 'CLIPSegTextConfig', 'CLIPSegVisionConfig', ], 'processing_clipseg': ['CLIPSegProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Any = [ 'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST', 'CLIPSegModel', 'CLIPSegPreTrainedModel', 'CLIPSegTextModel', 'CLIPSegVisionModel', 'CLIPSegForImageSegmentation', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys a_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
484
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available a_ : int = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Dict = [ 'VAN_PRETRAINED_MODEL_ARCHIVE_LIST', 'VanForImageClassification', 'VanModel', 'VanPreTrainedModel', ] if TYPE_CHECKING: from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_van import ( VAN_PRETRAINED_MODEL_ARCHIVE_LIST, VanForImageClassification, VanModel, VanPreTrainedModel, ) else: import sys a_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
484
1
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def UpperCamelCase ( lowercase_ = "isbn/0140328726" ) -> dict: '''simple docstring''' lowercase__ : Dict = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes if new_olid.count("""/""" ) != 1: lowercase__ : Optional[Any] = F'{olid} is not a valid Open Library olid' raise ValueError(lowercase_ ) return requests.get(F'https://openlibrary.org/{new_olid}.json' ).json() def UpperCamelCase ( lowercase_ ) -> dict: '''simple docstring''' lowercase__ : Tuple = { """title""": """Title""", """publish_date""": """Publish date""", """authors""": """Authors""", """number_of_pages""": """Number of pages:""", """first_sentence""": """First sentence""", """isbn_10""": """ISBN (10)""", """isbn_13""": """ISBN (13)""", } lowercase__ : List[Any] = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} lowercase__ : Any = [ get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""] ] lowercase__ : Tuple = data["""First sentence"""]["""value"""] for key, value in data.items(): if isinstance(lowercase_ , lowercase_ ): lowercase__ : List[str] = """, """.join(lowercase_ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: lowerCamelCase__ : Tuple = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (1_0, 1_3) or not isbn.isdigit(): print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''') continue print(f'''\nSearching Open Library for ISBN: {isbn}...\n''') try: lowerCamelCase__ : Optional[Any] = summarize_book(get_openlibrary_data(f'''isbn/{isbn}''')) print("""\n""".join(f'''{key}: {value}''' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(f'''Sorry, there are no results for ISBN: {isbn}.''')
12
from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder lowerCamelCase__ : Any = datasets.utils.logging.get_logger(__name__) class _snake_case ( folder_based_builder.FolderBasedBuilderConfig ): __lowerCAmelCase : bool = None __lowerCAmelCase : bool = None class _snake_case ( folder_based_builder.FolderBasedBuilder ): __lowerCAmelCase : Optional[Any] = datasets.Audio() __lowerCAmelCase : Union[str, Any] = 'audio' __lowerCAmelCase : str = AudioFolderConfig __lowerCAmelCase : List[str] # definition at the bottom of the script __lowerCAmelCase : Optional[int] = AudioClassification(audio_column='audio' , label_column='label' ) lowerCamelCase__ : int = [ """.aiff""", """.au""", """.avr""", """.caf""", """.flac""", """.htk""", """.svx""", """.mat4""", """.mat5""", """.mpc2k""", """.ogg""", """.paf""", """.pvf""", """.raw""", """.rf64""", """.sd2""", """.sds""", """.ircam""", """.voc""", """.w64""", """.wav""", """.nist""", """.wavex""", """.wve""", """.xi""", """.mp3""", """.opus""", ] lowerCamelCase__ : int = AUDIO_EXTENSIONS
12
1
from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
83
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } _SCREAMING_SNAKE_CASE = { 'vocab_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json' }, 'merges_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt' }, 'tokenizer_config_file': { 'facebook/blenderbot_small-90M': ( 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json' ) }, } _SCREAMING_SNAKE_CASE = {'facebook/blenderbot_small-90M': 512} def snake_case ( snake_case__ :Tuple) -> str: _A = set() _A = word[0] for char in word[1:]: pairs.add((prev_char, char)) _A = char _A = set(snake_case__) return pairs class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :List[Any] = VOCAB_FILES_NAMES lowerCamelCase :Tuple = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase :int = ['''input_ids''', '''attention_mask'''] def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="__start__" , lowerCAmelCase_="__end__" , lowerCAmelCase_="__unk__" , lowerCAmelCase_="__null__" , **lowerCAmelCase_ , ) -> int: super().__init__(unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ ) with open(lowerCAmelCase_ , encoding="""utf-8""" ) as vocab_handle: _A = json.load(lowerCAmelCase_ ) _A = {v: k for k, v in self.encoder.items()} with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle: _A = merges_handle.read().split("""\n""" )[1:-1] _A = [tuple(merge.split() ) for merge in merges] _A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) _A = {} @property def UpperCAmelCase ( self ) -> int: return len(self.encoder ) def UpperCAmelCase ( self ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str: if token in self.cache: return self.cache[token] _A = re.sub("""([.,!?()])""" , r""" \1""" , lowerCAmelCase_ ) _A = re.sub("""(')""" , r""" \1 """ , lowerCAmelCase_ ) _A = re.sub(r"""\s{2,}""" , """ """ , lowerCAmelCase_ ) if "\n" in token: _A = token.replace("""\n""" , """ __newln__""" ) _A = token.split(""" """ ) _A = [] for token in tokens: if not len(lowerCAmelCase_ ): continue _A = token.lower() _A = tuple(lowerCAmelCase_ ) _A = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] ) _A = get_pairs(lowerCAmelCase_ ) if not pairs: words.append(lowerCAmelCase_ ) continue while True: _A = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break _A , _A = bigram _A = [] _A = 0 while i < len(lowerCAmelCase_ ): try: _A = word.index(lowerCAmelCase_ , lowerCAmelCase_ ) new_word.extend(word[i:j] ) _A = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _A = tuple(lowerCAmelCase_ ) _A = new_word if len(lowerCAmelCase_ ) == 1: break else: _A = get_pairs(lowerCAmelCase_ ) _A = """@@ """.join(lowerCAmelCase_ ) _A = word[:-4] _A = word words.append(lowerCAmelCase_ ) return " ".join(lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[str]: _A = [] _A = re.findall(r"""\S+\n?""" , lowerCAmelCase_ ) for token in words: split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) ) return split_tokens def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int: _A = token.lower() return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str: return self.decoder.get(lowerCAmelCase_ , self.unk_token ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str: _A = """ """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip() return out_string def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]: if not os.path.isdir(lowerCAmelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) _A = os.path.join( lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + """\n""" ) _A = 0 with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' """ Please check that the tokenizer is not corrupted!""" ) _A = token_index writer.write(""" """.join(lowerCAmelCase_ ) + """\n""" ) index += 1 return vocab_file, merge_file
83
1
import argparse import hashlib # hashlib is only used inside the Test class import struct class __snake_case : def __init__( self ,a_ ): """simple docstring""" lowerCAmelCase__ = data lowerCAmelCase__ = [0X67452301, 0XEFCDAB89, 0X98BADCFE, 0X10325476, 0XC3D2E1F0] @staticmethod def SCREAMING_SNAKE_CASE_ ( a_ ,a_ ): """simple docstring""" return ((n << b) | (n >> (32 - b))) & 0XFFFFFFFF def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" lowerCAmelCase__ = b'''\x80''' + b'''\x00''' * (63 - (len(self.data ) + 8) % 64) lowerCAmelCase__ = self.data + padding + struct.pack('>Q' ,8 * len(self.data ) ) return padded_data def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" return [ self.padded_data[i : i + 64] for i in range(0 ,len(self.padded_data ) ,64 ) ] def SCREAMING_SNAKE_CASE_ ( self ,a_ ): """simple docstring""" lowerCAmelCase__ = list(struct.unpack('>16L' ,snake_case_ ) ) + [0] * 64 for i in range(16 ,80 ): lowerCAmelCase__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) ,1 ) return w def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" lowerCAmelCase__ = self.padding() lowerCAmelCase__ = self.split_blocks() for block in self.blocks: lowerCAmelCase__ = self.expand_block(snake_case_ ) lowerCAmelCase__ = self.h for i in range(0 ,80 ): if 0 <= i < 20: lowerCAmelCase__ = (b & c) | ((~b) & d) lowerCAmelCase__ = 0X5A827999 elif 20 <= i < 40: lowerCAmelCase__ = b ^ c ^ d lowerCAmelCase__ = 0X6ED9EBA1 elif 40 <= i < 60: lowerCAmelCase__ = (b & c) | (b & d) | (c & d) lowerCAmelCase__ = 0X8F1BBCDC elif 60 <= i < 80: lowerCAmelCase__ = b ^ c ^ d lowerCAmelCase__ = 0XCA62C1D6 lowerCAmelCase__ = ( self.rotate(snake_case_ ,5 ) + f + e + k + expanded_block[i] & 0XFFFFFFFF, a, self.rotate(snake_case_ ,30 ), c, d, ) lowerCAmelCase__ = ( self.h[0] + a & 0XFFFFFFFF, self.h[1] + b & 0XFFFFFFFF, self.h[2] + c & 0XFFFFFFFF, self.h[3] + d & 0XFFFFFFFF, self.h[4] + e & 0XFFFFFFFF, ) return ("{:08x}" * 5).format(*self.h ) def UpperCAmelCase_ ( ) -> Any: """simple docstring""" lowerCAmelCase__ = B'''Test String''' assert SHAaHash(lowerCamelCase_ ).final_hash() == hashlib.shaa(lowerCamelCase_ ).hexdigest() # noqa: S324 def UpperCAmelCase_ ( ) -> Union[str, Any]: """simple docstring""" lowerCAmelCase__ = argparse.ArgumentParser(description='Process some strings or files' ) parser.add_argument( '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' ) lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb' ) as f: lowerCAmelCase__ = f.read() else: lowerCAmelCase__ = bytes(lowerCamelCase_ , 'utf-8' ) print(SHAaHash(lowerCamelCase_ ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
193
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { "microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json", "microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json", } class _SCREAMING_SNAKE_CASE ( snake_case ): lowerCamelCase_ = 'markuplm' def __init__( self : List[Any] , snake_case_ : List[str]=3_0522 , snake_case_ : str=768 , snake_case_ : str=12 , snake_case_ : Optional[Any]=12 , snake_case_ : Any=3072 , snake_case_ : Dict="gelu" , snake_case_ : Dict=0.1 , snake_case_ : Optional[int]=0.1 , snake_case_ : int=512 , snake_case_ : Optional[Any]=2 , snake_case_ : int=0.02 , snake_case_ : Optional[Any]=1E-12 , snake_case_ : Dict=0 , snake_case_ : Optional[int]=0 , snake_case_ : Union[str, Any]=2 , snake_case_ : List[Any]=256 , snake_case_ : Union[str, Any]=1024 , snake_case_ : Optional[Any]=216 , snake_case_ : Optional[Any]=1001 , snake_case_ : Tuple=32 , snake_case_ : str=50 , snake_case_ : int="absolute" , snake_case_ : List[Any]=True , snake_case_ : List[Any]=None , **snake_case_ : Optional[Any] , ): """simple docstring""" super().__init__( pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ , ) A : int = vocab_size A : Dict = hidden_size A : str = num_hidden_layers A : List[Any] = num_attention_heads A : int = hidden_act A : List[Any] = intermediate_size A : Optional[Any] = hidden_dropout_prob A : Tuple = attention_probs_dropout_prob A : str = max_position_embeddings A : Dict = type_vocab_size A : Optional[int] = initializer_range A : Optional[Any] = layer_norm_eps A : Any = position_embedding_type A : List[Any] = use_cache A : List[str] = classifier_dropout # additional properties A : Optional[Any] = max_depth A : Tuple = max_xpath_tag_unit_embeddings A : str = max_xpath_subs_unit_embeddings A : Dict = tag_pad_id A : Dict = subs_pad_id A : List[str] = xpath_unit_hidden_size
256
0
from __future__ import annotations from typing import Any def __lowercase( UpperCAmelCase__ ): """simple docstring""" create_state_space_tree(UpperCAmelCase__ , [] , 0 ) def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): """simple docstring""" if index == len(UpperCAmelCase__ ): print(UpperCAmelCase__ ) return create_state_space_tree(UpperCAmelCase__ , UpperCAmelCase__ , index + 1 ) current_subsequence.append(sequence[index] ) create_state_space_tree(UpperCAmelCase__ , UpperCAmelCase__ , index + 1 ) current_subsequence.pop() if __name__ == "__main__": a_ : list[Any] = [3, 1, 2, 4] generate_all_subsequences(seq) seq.clear() seq.extend(['A', 'B', 'C']) generate_all_subsequences(seq)
710
from math import pi def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ): """simple docstring""" return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(9_0, 1_0))
484
0
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer a__ : str =logging.get_logger(__name__) a__ : Any ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} a__ : Dict ={ """vocab_file""": { """squeezebert/squeezebert-uncased""": ( """https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt""" ), """squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""", """squeezebert/squeezebert-mnli-headless""": ( """https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """squeezebert/squeezebert-uncased""": ( """https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json""" ), """squeezebert/squeezebert-mnli""": ( """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json""" ), """squeezebert/squeezebert-mnli-headless""": ( """https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json""" ), }, } a__ : int ={ """squeezebert/squeezebert-uncased""": 512, """squeezebert/squeezebert-mnli""": 512, """squeezebert/squeezebert-mnli-headless""": 512, } a__ : List[str] ={ """squeezebert/squeezebert-uncased""": {"""do_lower_case""": True}, """squeezebert/squeezebert-mnli""": {"""do_lower_case""": True}, """squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True}, } class snake_case ( a_ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] =VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ : Tuple =PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ : Optional[int] =PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ : List[Any] =SqueezeBertTokenizer def __init__( self : List[str] , __A : Optional[int]=None , __A : Optional[int]=None , __A : Optional[Any]=True , __A : List[str]="[UNK]" , __A : Optional[Any]="[SEP]" , __A : Any="[PAD]" , __A : Tuple="[CLS]" , __A : Dict="[MASK]" , __A : Any=True , __A : List[Any]=None , **__A : str , ): super().__init__( _snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , ) __UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _snake_case ) != do_lower_case or normalizer_state.get('strip_accents' , _snake_case ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _snake_case ) != tokenize_chinese_chars ): __UpperCamelCase = getattr(_snake_case , normalizer_state.pop('type' ) ) __UpperCamelCase = do_lower_case __UpperCamelCase = strip_accents __UpperCamelCase = tokenize_chinese_chars __UpperCamelCase = normalizer_class(**_snake_case ) __UpperCamelCase = do_lower_case def _lowerCamelCase ( self : str , __A : List[str] , __A : Optional[Any]=None ): __UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowerCamelCase ( self : Any , __A : List[str] , __A : str = None ): __UpperCamelCase = [self.sep_token_id] __UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowerCamelCase ( self : Tuple , __A : List[Any] , __A : Optional[Any] = None ): __UpperCamelCase = self._tokenizer.model.save(_snake_case , name=_snake_case ) return tuple(_snake_case )
399
'''simple docstring''' import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]: _UpperCamelCase : Union[str, Any] = checkpoint _UpperCamelCase : int = {} _UpperCamelCase : int = vae_state_dict['''encoder.conv_in.weight'''] _UpperCamelCase : Tuple = vae_state_dict['''encoder.conv_in.bias'''] _UpperCamelCase : Tuple = vae_state_dict['''encoder.conv_out.weight'''] _UpperCamelCase : Any = vae_state_dict['''encoder.conv_out.bias'''] _UpperCamelCase : List[Any] = vae_state_dict['''encoder.norm_out.weight'''] _UpperCamelCase : str = vae_state_dict['''encoder.norm_out.bias'''] _UpperCamelCase : str = vae_state_dict['''decoder.conv_in.weight'''] _UpperCamelCase : List[Any] = vae_state_dict['''decoder.conv_in.bias'''] _UpperCamelCase : List[str] = vae_state_dict['''decoder.conv_out.weight'''] _UpperCamelCase : List[str] = vae_state_dict['''decoder.conv_out.bias'''] _UpperCamelCase : int = vae_state_dict['''decoder.norm_out.weight'''] _UpperCamelCase : Dict = vae_state_dict['''decoder.norm_out.bias'''] _UpperCamelCase : Optional[int] = vae_state_dict['''quant_conv.weight'''] _UpperCamelCase : int = vae_state_dict['''quant_conv.bias'''] _UpperCamelCase : List[Any] = vae_state_dict['''post_quant_conv.weight'''] _UpperCamelCase : Optional[int] = vae_state_dict['''post_quant_conv.bias'''] # Retrieves the keys for the encoder down blocks only _UpperCamelCase : Optional[int] = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} ) _UpperCamelCase : Tuple = { layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(UpperCamelCase ) } # Retrieves the keys for the decoder up blocks only _UpperCamelCase : Any = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} ) _UpperCamelCase : int = { layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(UpperCamelCase ) } for i in range(UpperCamelCase ): _UpperCamelCase : Any = [key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key] if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict: _UpperCamelCase : Optional[int] = vae_state_dict.pop( f'''encoder.down.{i}.downsample.conv.weight''' ) _UpperCamelCase : Dict = vae_state_dict.pop( f'''encoder.down.{i}.downsample.conv.bias''' ) _UpperCamelCase : List[str] = renew_vae_resnet_paths(UpperCamelCase ) _UpperCamelCase : Union[str, Any] = {'''old''': f'''down.{i}.block''', '''new''': f'''down_blocks.{i}.resnets'''} assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase ) _UpperCamelCase : List[str] = [key for key in vae_state_dict if '''encoder.mid.block''' in key] _UpperCamelCase : Tuple = 2 for i in range(1 ,num_mid_res_blocks + 1 ): _UpperCamelCase : Optional[int] = [key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key] _UpperCamelCase : List[str] = renew_vae_resnet_paths(UpperCamelCase ) _UpperCamelCase : Tuple = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''} assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase ) _UpperCamelCase : Tuple = [key for key in vae_state_dict if '''encoder.mid.attn''' in key] _UpperCamelCase : List[str] = renew_vae_attention_paths(UpperCamelCase ) _UpperCamelCase : Any = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''} assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase ) conv_attn_to_linear(UpperCamelCase ) for i in range(UpperCamelCase ): _UpperCamelCase : Union[str, Any] = num_up_blocks - 1 - i _UpperCamelCase : Optional[int] = [ key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key ] if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict: _UpperCamelCase : Tuple = vae_state_dict[ f'''decoder.up.{block_id}.upsample.conv.weight''' ] _UpperCamelCase : Any = vae_state_dict[ f'''decoder.up.{block_id}.upsample.conv.bias''' ] _UpperCamelCase : Any = renew_vae_resnet_paths(UpperCamelCase ) _UpperCamelCase : Any = {'''old''': f'''up.{block_id}.block''', '''new''': f'''up_blocks.{i}.resnets'''} assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase ) _UpperCamelCase : List[Any] = [key for key in vae_state_dict if '''decoder.mid.block''' in key] _UpperCamelCase : Optional[Any] = 2 for i in range(1 ,num_mid_res_blocks + 1 ): _UpperCamelCase : int = [key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key] _UpperCamelCase : Optional[int] = renew_vae_resnet_paths(UpperCamelCase ) _UpperCamelCase : Optional[Any] = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''} assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase ) _UpperCamelCase : Tuple = [key for key in vae_state_dict if '''decoder.mid.attn''' in key] _UpperCamelCase : Tuple = renew_vae_attention_paths(UpperCamelCase ) _UpperCamelCase : Dict = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''} assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase ) conv_attn_to_linear(UpperCamelCase ) return new_checkpoint def snake_case__ ( UpperCamelCase ,UpperCamelCase ,) -> List[str]: # Only support V1 _UpperCamelCase : Tuple = requests.get( ''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' ) _UpperCamelCase : List[Any] = io.BytesIO(r.content ) _UpperCamelCase : Optional[int] = OmegaConf.load(UpperCamelCase ) _UpperCamelCase : str = 5_12 _UpperCamelCase : int = '''cuda''' if torch.cuda.is_available() else '''cpu''' if checkpoint_path.endswith('''safetensors''' ): from safetensors import safe_open _UpperCamelCase : str = {} with safe_open(UpperCamelCase ,framework='''pt''' ,device='''cpu''' ) as f: for key in f.keys(): _UpperCamelCase : Union[str, Any] = f.get_tensor(UpperCamelCase ) else: _UpperCamelCase : str = torch.load(UpperCamelCase ,map_location=UpperCamelCase )['''state_dict'''] # Convert the VAE model. _UpperCamelCase : Dict = create_vae_diffusers_config(UpperCamelCase ,image_size=UpperCamelCase ) _UpperCamelCase : str = custom_convert_ldm_vae_checkpoint(UpperCamelCase ,UpperCamelCase ) _UpperCamelCase : Dict = AutoencoderKL(**UpperCamelCase ) vae.load_state_dict(UpperCamelCase ) vae.save_pretrained(UpperCamelCase ) if __name__ == "__main__": _UpperCAmelCase : str = argparse.ArgumentParser() parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""") parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""") _UpperCAmelCase : int = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
683
0
"""simple docstring""" from numpy import exp, pi, sqrt def lowerCamelCase ( _snake_case ,_snake_case = 0.0 ,_snake_case = 1.0 ): return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
254
"""simple docstring""" from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf UpperCamelCase__ = logging.get_logger(__name__) @dataclass class a ( lowercase ): UpperCamelCase : Dict = [ """no_inference""", """no_cuda""", """no_tpu""", """no_speed""", """no_memory""", """no_env_print""", """no_multi_process""", ] def __init__( self , **UpperCamelCase_ ): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: UpperCAmelCase__ : int = deprecated_arg[3:] UpperCAmelCase__ : Tuple = not kwargs.pop(UpperCamelCase_ ) logger.warning( F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or''' F''' {positive_arg}={kwargs[positive_arg]}''' ) UpperCAmelCase__ : Dict = kwargs.pop('tpu_name' , self.tpu_name ) UpperCAmelCase__ : Tuple = kwargs.pop('device_idx' , self.device_idx ) UpperCAmelCase__ : List[str] = kwargs.pop('eager_mode' , self.eager_mode ) UpperCAmelCase__ : Any = kwargs.pop('use_xla' , self.use_xla ) super().__init__(**UpperCamelCase_ ) UpperCamelCase : str = field( default=lowercase , metadata={"""help""": """Name of TPU"""} , ) UpperCamelCase : int = field( default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , ) UpperCamelCase : bool = field(default=lowercase , metadata={"""help""": """Benchmark models in eager model."""} ) UpperCamelCase : bool = field( default=lowercase , metadata={ """help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.""" } , ) @cached_property def __snake_case ( self ): requires_backends(self , ['tf'] ) UpperCAmelCase__ : Optional[Any] = None if self.tpu: try: if self.tpu_name: UpperCAmelCase__ : str = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: UpperCAmelCase__ : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: UpperCAmelCase__ : Tuple = None return tpu @cached_property def __snake_case ( self ): requires_backends(self , ['tf'] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) UpperCAmelCase__ : Union[str, Any] = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' ) UpperCAmelCase__ : Any = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' ) else: tf.config.set_visible_devices([] , 'GPU' ) # disable GPU UpperCAmelCase__ : Any = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' ) return strategy @property def __snake_case ( self ): requires_backends(self , ['tf'] ) return self._setup_tpu is not None @property def __snake_case ( self ): requires_backends(self , ['tf'] ) return self._setup_strategy @property def __snake_case ( self ): requires_backends(self , ['tf'] ) return tf.config.list_physical_devices('GPU' ) @property def __snake_case ( self ): requires_backends(self , ['tf'] ) if self.cuda: return len(self.gpu_list ) return 0 @property def __snake_case ( self ): return self.n_gpu > 0
254
1
'''simple docstring''' def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : List[Any] ) -> List[Any]: """simple docstring""" if discount_rate < 0: raise ValueError('Discount rate cannot be negative' ) if not cash_flows: raise ValueError('Cash flows list cannot be empty' ) _SCREAMING_SNAKE_CASE =sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_UpperCamelCase ) ) return round(_UpperCamelCase , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
405
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _a: Optional[int] = logging.get_logger(__name__) _a: Optional[Any] = { """hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""", # See all YOLOS models at https://huggingface.co/models?filter=yolos } class __UpperCamelCase ( lowercase ): SCREAMING_SNAKE_CASE__ = 'yolos' def __init__( self : List[str] , lowerCAmelCase : List[Any]=768 , lowerCAmelCase : Optional[int]=12 , lowerCAmelCase : List[Any]=12 , lowerCAmelCase : List[Any]=3_072 , lowerCAmelCase : str="gelu" , lowerCAmelCase : int=0.0 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : Dict=0.02 , lowerCAmelCase : List[str]=1e-12 , lowerCAmelCase : Union[str, Any]=[512, 864] , lowerCAmelCase : int=16 , lowerCAmelCase : Optional[Any]=3 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=100 , lowerCAmelCase : Any=True , lowerCAmelCase : Tuple=False , lowerCAmelCase : List[str]=1 , lowerCAmelCase : Union[str, Any]=5 , lowerCAmelCase : int=2 , lowerCAmelCase : List[str]=5 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : Any=0.1 , **lowerCAmelCase : Any , ): '''simple docstring''' super().__init__(**lowerCAmelCase ) UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = initializer_range UpperCAmelCase_ = layer_norm_eps UpperCAmelCase_ = image_size UpperCAmelCase_ = patch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = qkv_bias UpperCAmelCase_ = num_detection_tokens UpperCAmelCase_ = use_mid_position_embeddings UpperCAmelCase_ = auxiliary_loss # Hungarian matcher UpperCAmelCase_ = class_cost UpperCAmelCase_ = bbox_cost UpperCAmelCase_ = giou_cost # Loss coefficients UpperCAmelCase_ = bbox_loss_coefficient UpperCAmelCase_ = giou_loss_coefficient UpperCAmelCase_ = eos_coefficient class __UpperCamelCase ( lowercase ): SCREAMING_SNAKE_CASE__ = version.parse('1.11' ) @property def __A ( self : Optional[Any] ): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def __A ( self : Optional[int] ): '''simple docstring''' return 1e-4 @property def __A ( self : List[Any] ): '''simple docstring''' return 12
162
0
"""simple docstring""" import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def _lowerCAmelCase ( UpperCAmelCase__ : List[str] ) ->str: A__ : Tuple = {} A__ : Union[str, Any] = tokenizer(example["""content"""], truncation=UpperCAmelCase__ )["""input_ids"""] A__ : Any = len(example["""content"""] ) / len(output["""input_ids"""] ) return output A_ = HfArgumentParser(PretokenizationArguments) A_ = parser.parse_args() if args.num_workers is None: A_ = multiprocessing.cpu_count() A_ = AutoTokenizer.from_pretrained(args.tokenizer_dir) A_ = time.time() A_ = load_dataset(args.dataset_name, split='''train''') print(F'Dataset loaded in {time.time()-t_start:.2f}s') A_ = time.time() A_ = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ '''repo_name''', '''path''', '''copies''', '''size''', '''content''', '''license''', '''hash''', '''line_mean''', '''line_max''', '''alpha_frac''', '''autogenerated''', ], ) print(F'Dataset tokenized in {time.time()-t_start:.2f}s') A_ = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'Data pushed to the hub in {time.time()-t_start:.2f}s')
498
"""simple docstring""" def _lowerCAmelCase ( UpperCAmelCase__ : str, UpperCAmelCase__ : List[Any] ) ->List[Any]: A__ : Union[str, Any] = [1] for i in range(2, UpperCAmelCase__ ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" A__ : Optional[int] = [] A__ : List[str] = list(range(UpperCAmelCase__ ) ) # Find permutation while factorials: A__ : Optional[int] = factorials.pop() A__ , A__ : Optional[int] = divmod(UpperCAmelCase__, UpperCAmelCase__ ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
498
1
"""simple docstring""" import os from collections.abc import Iterator def __magic_name__ ( lowercase = "." ): for dir_path, dir_names, filenames in os.walk(lowercase ): SCREAMING_SNAKE_CASE_: Any =[d for d in dir_names if d != """scripts""" and d[0] not in """._"""] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(lowercase )[1] in (".py", ".ipynb"): yield os.path.join(lowercase , lowercase ).lstrip("""./""" ) def __magic_name__ ( lowercase ): return f'''{i * " "}*''' if i else "\n##" def __magic_name__ ( lowercase , lowercase ): SCREAMING_SNAKE_CASE_: Dict =old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(lowercase ) or old_parts[i] != new_part) and new_part: print(f'''{md_prefix(lowercase )} {new_part.replace("_" , " " ).title()}''' ) return new_path def __magic_name__ ( lowercase = "." ): SCREAMING_SNAKE_CASE_: Any ="""""" for filepath in sorted(good_file_paths(lowercase ) ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =os.path.split(lowercase ) if filepath != old_path: SCREAMING_SNAKE_CASE_: List[str] =print_path(lowercase , lowercase ) SCREAMING_SNAKE_CASE_: Tuple =(filepath.count(os.sep ) + 1) if filepath else 0 SCREAMING_SNAKE_CASE_: Union[str, Any] =f'''{filepath}/{filename}'''.replace(""" """ , """%20""" ) SCREAMING_SNAKE_CASE_: Optional[Any] =os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0] print(f'''{md_prefix(lowercase )} [{filename}]({url})''' ) if __name__ == "__main__": print_directory_md(""".""")
409
"""simple docstring""" from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class a ( UpperCAmelCase__ ): def __init__( self : Any , lowerCAmelCase : NestedDataStructureLike[PathLike] , lowerCAmelCase : Optional[NamedSplit] = None , lowerCAmelCase : Optional[Features] = None , lowerCAmelCase : str = None , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[int] = None , **lowerCAmelCase : Union[str, Any] , ) -> str: '''simple docstring''' super().__init__( lowerCAmelCase , split=lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase , streaming=lowerCAmelCase , num_proc=lowerCAmelCase , **lowerCAmelCase , ) SCREAMING_SNAKE_CASE_: Union[str, Any] =path_or_paths if isinstance(lowerCAmelCase , lowerCAmelCase ) else {self.split: path_or_paths} SCREAMING_SNAKE_CASE_: str =Text( cache_dir=lowerCAmelCase , data_files=lowerCAmelCase , features=lowerCAmelCase , **lowerCAmelCase , ) def lowerCamelCase__ ( self : Any ) -> Optional[int]: '''simple docstring''' if self.streaming: SCREAMING_SNAKE_CASE_: Union[str, Any] =self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: SCREAMING_SNAKE_CASE_: List[Any] =None SCREAMING_SNAKE_CASE_: Tuple =None SCREAMING_SNAKE_CASE_: List[Any] =None SCREAMING_SNAKE_CASE_: Dict =None self.builder.download_and_prepare( download_config=lowerCAmelCase , download_mode=lowerCAmelCase , verification_mode=lowerCAmelCase , base_path=lowerCAmelCase , num_proc=self.num_proc , ) SCREAMING_SNAKE_CASE_: List[str] =self.builder.as_dataset( split=self.split , verification_mode=lowerCAmelCase , in_memory=self.keep_in_memory ) return dataset
409
1
"""simple docstring""" import argparse import fairseq import torch from torch import nn from transformers import ( MBartaaTokenizer, MBartConfig, MBartForCausalLM, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : List[Any] = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } SCREAMING_SNAKE_CASE__ : str = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def A_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Any: for attribute in key.split('.' ): a : Optional[int] = getattr(UpperCAmelCase__ , UpperCAmelCase__ ) if weight_type is not None: a : str = getattr(UpperCAmelCase__ , UpperCAmelCase__ ).shape else: a : Dict = hf_pointer.shape assert hf_shape == value.shape, ( F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' F' {value.shape} for {full_name}' ) if weight_type == "weight": a : List[str] = value elif weight_type == "weight_g": a : int = value elif weight_type == "weight_v": a : Union[str, Any] = value elif weight_type == "bias": a : Dict = value else: a : Optional[Any] = value logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def A_ ( UpperCAmelCase__ , UpperCAmelCase__ ) -> Union[str, Any]: a : Dict = [] a : Optional[Any] = fairseq_model.state_dict() a : Optional[Any] = hf_model.feature_extractor a : int = hf_model.adapter for name, value in fairseq_dict.items(): a : List[Any] = False if "conv_layers" in name: load_conv_layer( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hf_model.config.feat_extract_norm == 'group' , ) a : str = True elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ): load_adapter(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) a : Union[str, Any] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: a : Optional[Any] = True if "*" in mapped_key: a : str = name.split(UpperCAmelCase__ )[0].split('.' )[-2] a : Optional[int] = mapped_key.replace('*' , UpperCAmelCase__ ) if "weight_g" in name: a : List[str] = 'weight_g' elif "weight_v" in name: a : Union[str, Any] = 'weight_v' elif "bias" in name: a : int = 'bias' elif "weight" in name: a : str = 'weight' else: a : Any = None set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) continue if not is_used: unused_weights.append(UpperCAmelCase__ ) logger.warning(F'Unused weights: {unused_weights}' ) def A_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Tuple: a : List[str] = full_name.split('conv_layers.' )[-1] a : str = name.split('.' ) a : Any = int(items[0] ) a : List[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) a : Dict = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) a : int = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was' " found." ) a : int = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'{full_name} has size {value.shape}, but' F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) a : Dict = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(UpperCAmelCase__ ) def A_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Any: a : Union[str, Any] = full_name.split('adaptor.' )[-1] a : Union[str, Any] = name.split('.' ) if items[1].isdigit(): a : Tuple = int(items[1] ) else: a : Optional[Any] = None if "adaptor" not in full_name: if "proj_ln" in full_name: # has to be layer norm if "bias" in name: assert ( value.shape == adapter.proj_layer_norm.bias.data.shape ), F'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.' a : Optional[int] = value logger.info(F'Adapter proj layer norm bias was initialized from {full_name}.' ) if "weight" in name: assert ( value.shape == adapter.proj_layer_norm.weight.data.shape ), F'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.' a : Optional[Any] = value else: # has to be projection layer if "bias" in name: assert ( value.shape == adapter.proj.bias.data.shape ), F'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.' a : Tuple = value logger.info(F'Adapter proj layer bias was initialized from {full_name}.' ) if "weight" in name: assert ( value.shape == adapter.proj.weight.data.shape ), F'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.' a : Optional[Any] = value logger.info(F'Adapter proj layer weight was initialized from {full_name}.' ) elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): if "bias" in name: assert ( value.shape == adapter.layers[layer_id].conv.bias.data.shape ), F'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.' a : Optional[Any] = value logger.info(F'Adapter layer {layer_id} bias was initialized from {full_name}.' ) elif "weight" in name: assert ( value.shape == adapter.layers[layer_id].conv.weight.data.shape ), F'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.' a : Any = value logger.info(F'Adapter layer {layer_id} bias was initialized from {full_name}.' ) else: unused_weights.append(UpperCAmelCase__ ) def A_ ( UpperCAmelCase__ ) -> int: a , a : Tuple = emb.weight.shape a : List[str] = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ , bias=UpperCAmelCase__ ) a : List[Any] = emb.weight.data return lin_layer @torch.no_grad() def A_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) -> Optional[int]: a : Optional[int] = WavaVecaConfig.from_pretrained( UpperCAmelCase__ , add_adapter=UpperCAmelCase__ , adapter_stride=UpperCAmelCase__ , adapter_kernel_size=UpperCAmelCase__ , use_auth_token=UpperCAmelCase__ , output_hidden_size=UpperCAmelCase__ , ) a : Union[str, Any] = MBartConfig.from_pretrained(UpperCAmelCase__ ) # load model a , a , a : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={ 'config_yaml': config_yaml_path, 'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path, 'load_pretrained_decoder_from': None, } , ) a : Any = model[0].eval() # load feature extractor a : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase__ , use_auth_token=UpperCAmelCase__ ) # set weights for wav2vec2 encoder a : Dict = WavaVecaModel(UpperCAmelCase__ ) recursively_load_weights_wavaveca(model.encoder , UpperCAmelCase__ ) # load decoder weights a : List[str] = MBartForCausalLM(UpperCAmelCase__ ) a , a : Optional[int] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCAmelCase__ ) logger.warning(F'The following keys are missing when loading the decoder weights: {missing_keys}' ) logger.warning(F'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' ) a : Any = SpeechEncoderDecoderModel(encoder=UpperCAmelCase__ , decoder=UpperCAmelCase__ ) a : Tuple = False a : Optional[int] = MBartaaTokenizer(UpperCAmelCase__ ) tokenizer.save_pretrained(UpperCAmelCase__ ) a : Optional[int] = hf_wavavec.config.to_dict() a : Union[str, Any] = tokenizer.pad_token_id a : Any = tokenizer.bos_token_id a : Any = tokenizer.eos_token_id a : str = 'mbart50' a : str = 'wav2vec2' a : List[str] = tokenizer.eos_token_id a : int = 25_0004 a : Union[str, Any] = tokenizer.eos_token_id a : Any = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase__ ) hf_wavavec.save_pretrained(UpperCAmelCase__ ) feature_extractor.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : List[Any] = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model") parser.add_argument( "--encoder_config_path", default="facebook/wav2vec2-xls-r-1b", type=str, help="Path to hf encoder wav2vec2 checkpoint config", ) parser.add_argument( "--decoder_config_path", default="facebook/mbart-large-50-one-to-many-mmt", type=str, help="Path to hf decoder checkpoint config", ) parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers") parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers") parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers") parser.add_argument("--encoder_output_dim", default=1024, type=int, help="encoder output dim") parser.add_argument("--start_token_id", default=25_0004, type=int, help="`decoder_start_token_id` of model config") SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, args.config_yaml_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, add_adapter=args.add_adapter, adapter_kernel_size=args.adapter_kernel_size, adapter_stride=args.adapter_stride, decoder_start_token_id=args.start_token_id, encoder_output_dim=args.encoder_output_dim, )
509
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class A_ ( _UpperCAmelCase ): """simple docstring""" lowercase : Tuple = ["image_processor", "tokenizer"] lowercase : str = "AutoImageProcessor" lowercase : List[Any] = "AutoTokenizer" def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: super().__init__(__UpperCAmelCase , __UpperCAmelCase ) a : Dict = self.image_processor def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Dict: if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: a : int = self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if images is not None: a : Optional[Any] = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is not None and images is not None: a : int = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase ) def lowercase_ ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def lowercase_ ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property def lowercase_ ( self ) -> Any: return ["input_ids", "attention_mask", "pixel_values"]
509
1
'''simple docstring''' import argparse import copy def lowerCamelCase( SCREAMING_SNAKE_CASE_ ) -> Optional[int]: A_ = {} with open(__lowerCamelCase ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: A_ = [] _list.append([line.split()[1], line.split()[2]] ) A_ = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: A_ = [] _list.append([line.split()[0], line.split()[2]] ) A_ = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def lowerCamelCase( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> str: with open(__lowerCamelCase ) as f: A_ = f.read(1 ) A_ = start_node A_ = [] A_ = start_node A_ = 0 while visiting not in first_solution: A_ = 1_0000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(__lowerCamelCase ) and k[0] not in first_solution: A_ = k[1] A_ = k[0] first_solution.append(__lowerCamelCase ) A_ = distance_of_first_solution + int(__lowerCamelCase ) A_ = best_node first_solution.append(__lowerCamelCase ) A_ = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 A_ = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_0000 ) return first_solution, distance_of_first_solution def lowerCamelCase( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: A_ = [] for n in solution[1:-1]: A_ = solution.index(__lowerCamelCase ) for kn in solution[1:-1]: A_ = solution.index(__lowerCamelCase ) if n == kn: continue A_ = copy.deepcopy(__lowerCamelCase ) A_ = kn A_ = n A_ = 0 for k in _tmp[:-1]: A_ = _tmp[_tmp.index(__lowerCamelCase ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: A_ = distance + int(i[1] ) _tmp.append(__lowerCamelCase ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) A_ = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda SCREAMING_SNAKE_CASE_ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def lowerCamelCase( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> int: A_ = 1 A_ = first_solution A_ = [] A_ = distance_of_first_solution A_ = solution while count <= iters: A_ = find_neighborhood(__lowerCamelCase ,__lowerCamelCase ) A_ = 0 A_ = neighborhood[index_of_best_solution] A_ = len(__lowerCamelCase ) - 1 A_ = False while not found: A_ = 0 while i < len(__lowerCamelCase ): if best_solution[i] != solution[i]: A_ = best_solution[i] A_ = solution[i] break A_ = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) A_ = True A_ = best_solution[:-1] A_ = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: A_ = cost A_ = solution else: A_ = index_of_best_solution + 1 A_ = neighborhood[index_of_best_solution] if len(__lowerCamelCase ) >= size: tabu_list.pop(0 ) A_ = count + 1 return best_solution_ever, best_cost def lowerCamelCase( SCREAMING_SNAKE_CASE_=None ) -> Optional[Any]: A_ = generate_neighbours(args.File ) A_ , A_ = generate_first_solution( args.File ,__lowerCamelCase ) A_ , A_ = tabu_search( __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,args.Iterations ,args.Size ,) print(F'Best solution: {best_sol}, with total distance: {best_cost}.' ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="Tabu Search") parser.add_argument( "-f", "--File", type=str, help="Path to the file containing the data", required=True, ) parser.add_argument( "-i", "--Iterations", type=int, help="How many iterations the algorithm should perform", required=True, ) parser.add_argument( "-s", "--Size", type=int, help="Size of the tabu list", required=True ) # Pass the arguments to main method main(parser.parse_args())
366
import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def __A ( __lowerCamelCase ) -> List[str]: return 1.0 / (1.0 + np.exp(-_outputs )) def __A ( __lowerCamelCase ) -> List[str]: a = np.max(_outputs , axis=-1 , keepdims=__lowerCamelCase ) a = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__lowerCamelCase ) class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = '''sigmoid''' UpperCamelCase__ = '''softmax''' UpperCamelCase__ = '''none''' @add_end_docstrings( __magic_name__ , r''' return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. ''' , ) class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = False UpperCamelCase__ = ClassificationFunction.NONE def __init__( self :List[str] , **__magic_name__ :List[Any] ): '''simple docstring''' super().__init__(**__magic_name__ ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def lowerCamelCase__ ( self :Any , __magic_name__ :int=None , __magic_name__ :Any=None , __magic_name__ :Union[str, Any]="" , **__magic_name__ :Tuple ): '''simple docstring''' a = tokenizer_kwargs a = {} if hasattr(self.model.config , """return_all_scores""" ) and return_all_scores is None: a = self.model.config.return_all_scores if isinstance(__magic_name__ , __magic_name__ ) or top_k is None: a = top_k a = False elif return_all_scores is not None: warnings.warn( """`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of""" """ `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" , __magic_name__ , ) if return_all_scores: a = None else: a = 1 if isinstance(__magic_name__ , __magic_name__ ): a = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: a = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self :Dict , *__magic_name__ :Optional[int] , **__magic_name__ :Optional[Any] ): '''simple docstring''' a = super().__call__(*__magic_name__ , **__magic_name__ ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. a = """top_k""" not in kwargs if isinstance(args[0] , __magic_name__ ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Optional[Any] , **__magic_name__ :Optional[Any] ): '''simple docstring''' a = self.framework if isinstance(__magic_name__ , __magic_name__ ): return self.tokenizer(**__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ ) elif isinstance(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) == 1 and isinstance(inputs[0] , __magic_name__ ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__magic_name__ , **__magic_name__ ) elif isinstance(__magic_name__ , __magic_name__ ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( """The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a""" """ dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" ) return self.tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ ) def lowerCamelCase__ ( self :List[str] , __magic_name__ :Tuple ): '''simple docstring''' return self.model(**__magic_name__ ) def lowerCamelCase__ ( self :Dict , __magic_name__ :Union[str, Any] , __magic_name__ :int=None , __magic_name__ :Union[str, Any]=1 , __magic_name__ :Tuple=True ): '''simple docstring''' if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: a = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: a = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , """function_to_apply""" ) and function_to_apply is None: a = self.model.config.function_to_apply else: a = ClassificationFunction.NONE a = model_outputs["""logits"""][0] a = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: a = sigmoid(__magic_name__ ) elif function_to_apply == ClassificationFunction.SOFTMAX: a = softmax(__magic_name__ ) elif function_to_apply == ClassificationFunction.NONE: a = outputs else: raise ValueError(F'Unrecognized `function_to_apply` argument: {function_to_apply}' ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} a = [ {"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(__magic_name__ ) ] if not _legacy: dict_scores.sort(key=lambda __magic_name__ : x["score"] , reverse=__magic_name__ ) if top_k is not None: a = dict_scores[:top_k] return dict_scores
468
0
"""simple docstring""" import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() a__ : Tuple = 2 class __magic_name__ : def __init__( self , *, # begin keyword-only arguments __magic_name__="<s>" , __magic_name__="<pad>" , __magic_name__="</s>" , __magic_name__="<unk>" , __magic_name__=None , ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = bos, unk, pad, eos _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = {} _lowerCAmelCase = self.add_symbol(__magic_name__ ) _lowerCAmelCase = self.add_symbol(__magic_name__ ) _lowerCAmelCase = self.add_symbol(__magic_name__ ) _lowerCAmelCase = self.add_symbol(__magic_name__ ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(__magic_name__ ) _lowerCAmelCase = len(self.symbols ) def __eq__( self , __magic_name__ ): """simple docstring""" return self.indices == other.indices def __getitem__( self , __magic_name__ ): """simple docstring""" if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self ): """simple docstring""" return len(self.symbols ) def __contains__( self , __magic_name__ ): """simple docstring""" return sym in self.indices @classmethod def _lowerCamelCase ( cls , __magic_name__ ): """simple docstring""" _lowerCAmelCase = cls() d.add_from_file(__magic_name__ ) return d def _lowerCamelCase ( self , __magic_name__ , __magic_name__=1 , __magic_name__=False ): """simple docstring""" if word in self.indices and not overwrite: _lowerCAmelCase = self.indices[word] _lowerCAmelCase = self.count[idx] + n return idx else: _lowerCAmelCase = len(self.symbols ) _lowerCAmelCase = idx self.symbols.append(__magic_name__ ) self.count.append(__magic_name__ ) return idx def _lowerCamelCase ( self , __magic_name__ ): """simple docstring""" return 0 def _lowerCamelCase ( self , __magic_name__ ): """simple docstring""" if isinstance(__magic_name__ , __magic_name__ ): try: with open(__magic_name__ , 'r' , encoding='utf-8' ) as fd: self.add_from_file(__magic_name__ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(__magic_name__ ) ) return _lowerCAmelCase = f.readlines() _lowerCAmelCase = self._load_meta(__magic_name__ ) for line in lines[indices_start_line:]: try: _lowerCAmelCase , _lowerCAmelCase = line.rstrip().rsplit(' ' , 1 ) if field == "#fairseq:overwrite": _lowerCAmelCase = True _lowerCAmelCase , _lowerCAmelCase = line.rsplit(' ' , 1 ) else: _lowerCAmelCase = False _lowerCAmelCase = int(__magic_name__ ) _lowerCAmelCase = line if word in self and not overwrite: raise RuntimeError( 'Duplicate word found when loading Dictionary: \'{}\'. ' 'Duplicate words can overwrite earlier ones by adding the ' '#fairseq:overwrite flag at the end of the corresponding row ' 'in the dictionary file. If using the Camembert model, please ' 'download an updated copy of the model file.'.format(__magic_name__ ) ) self.add_symbol(__magic_name__ , n=__magic_name__ , overwrite=__magic_name__ ) except ValueError: raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' ) def A__ ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = dict((re.sub(R'@@$', '', __lowerCamelCase ), v) if k.endswith('@@' ) else (re.sub(R'$', '</w>', __lowerCamelCase ), v) for k, v in d.items() ) _lowerCAmelCase = '<s> <pad> </s> <unk>'.split() # restore the special tokens for k in keep_keys: del da[F'''{k}</w>'''] _lowerCAmelCase = d[k] # restore return da def A__ ( __lowerCamelCase, __lowerCamelCase ): """simple docstring""" # prep if not os.path.exists(__lowerCamelCase ): raise ValueError(F'''path {biogpt_checkpoint_path} does not exist!''' ) os.makedirs(__lowerCamelCase, exist_ok=__lowerCamelCase ) print(F'''Writing results to {pytorch_dump_folder_path}''' ) # handle various types of models _lowerCAmelCase = os.path.join(__lowerCamelCase, 'checkpoint.pt' ) if not os.path.isfile(__lowerCamelCase ): raise ValueError(F'''path to the file {checkpoint_file} does not exist!''' ) _lowerCAmelCase = torch.load(__lowerCamelCase, map_location='cpu' ) _lowerCAmelCase = chkpt['cfg']['model'] # dicts _lowerCAmelCase = os.path.join(__lowerCamelCase, 'dict.txt' ) if not os.path.isfile(__lowerCamelCase ): raise ValueError(F'''path to the file {dict_file} does not exist!''' ) _lowerCAmelCase = Dictionary.load(__lowerCamelCase ) _lowerCAmelCase = rewrite_dict_keys(src_dict.indices ) _lowerCAmelCase = len(__lowerCamelCase ) _lowerCAmelCase = os.path.join(__lowerCamelCase, VOCAB_FILES_NAMES['vocab_file'] ) print(F'''Generating {src_vocab_file} of {src_vocab_size} records''' ) with open(__lowerCamelCase, 'w', encoding='utf-8' ) as f: f.write(json.dumps(__lowerCamelCase, ensure_ascii=__lowerCamelCase, indent=__lowerCamelCase ) ) # merges_file (bpecodes) _lowerCAmelCase = os.path.join(__lowerCamelCase, 'bpecodes' ) if not os.path.isfile(__lowerCamelCase ): raise ValueError(F'''path to the file {bpecodes_file} does not exist!''' ) _lowerCAmelCase = os.path.join(__lowerCamelCase, VOCAB_FILES_NAMES['merges_file'] ) shutil.copyfile(__lowerCamelCase, __lowerCamelCase ) # model config _lowerCAmelCase = os.path.join(__lowerCamelCase, 'config.json' ) _lowerCAmelCase = { 'activation_dropout': args['activation_dropout'], 'architectures': ['BioGptForCausalLM'], 'attention_probs_dropout_prob': args['attention_dropout'], 'bos_token_id': 0, 'eos_token_id': 2, 'hidden_act': args['activation_fn'], 'hidden_dropout_prob': args['dropout'], 'hidden_size': args['decoder_embed_dim'], 'initializer_range': 0.02, 'intermediate_size': args['decoder_ffn_embed_dim'], 'layer_norm_eps': 1e-12, 'layerdrop': args['decoder_layerdrop'], 'max_position_embeddings': args['max_target_positions'], 'model_type': 'biogpt', 'num_attention_heads': args['decoder_attention_heads'], 'num_hidden_layers': args['decoder_layers'], 'pad_token_id': 1, 'scale_embedding': not args['no_scale_embedding'], 'tie_word_embeddings': args['share_decoder_input_output_embed'], 'vocab_size': src_vocab_size, } # good hparam defaults to start with print(F'''Generating {biogpt_model_config_file}''' ) with open(__lowerCamelCase, 'w', encoding='utf-8' ) as f: f.write(json.dumps(__lowerCamelCase, ensure_ascii=__lowerCamelCase, indent=__lowerCamelCase ) ) # tokenizer config _lowerCAmelCase = os.path.join(__lowerCamelCase, __lowerCamelCase ) _lowerCAmelCase = { 'bos_token': '<s>', 'eos_token': '</s>', 'model_max_length': 1_0_2_4, 'pad_token': '<pad>', 'special_tokens_map_file': None, 'tokenizer_class': 'BioGptTokenizer', 'unk_token': '<unk>', } print(F'''Generating {biogpt_tokenizer_config_file}''' ) with open(__lowerCamelCase, 'w', encoding='utf-8' ) as f: f.write(json.dumps(__lowerCamelCase, ensure_ascii=__lowerCamelCase, indent=__lowerCamelCase ) ) # model _lowerCAmelCase = chkpt['model'] # remove unneeded keys _lowerCAmelCase = [ 'decoder.version', ] for k in ignore_keys: model_state_dict.pop(__lowerCamelCase, __lowerCamelCase ) _lowerCAmelCase = list(model_state_dict.keys() ) for layer_name in layer_names: if layer_name.endswith('output_projection.weight' ): _lowerCAmelCase = model_state_dict.pop(__lowerCamelCase ) else: _lowerCAmelCase = model_state_dict.pop(__lowerCamelCase ) _lowerCAmelCase = BioGptConfig.from_pretrained(__lowerCamelCase ) _lowerCAmelCase = BioGptForCausalLM(__lowerCamelCase ) # check that it loads ok model_new.load_state_dict(__lowerCamelCase ) # save _lowerCAmelCase = os.path.join(__lowerCamelCase, __lowerCamelCase ) print(F'''Generating {pytorch_weights_dump_path}''' ) torch.save(__lowerCamelCase, __lowerCamelCase ) print('Conversion is done!' ) if __name__ == "__main__": a__ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--biogpt_checkpoint_path""", default=None, type=str, required=True, help=( """Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,""" """ bpecodes, etc.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) a__ : str = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
712
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class __magic_name__ ( unittest.TestCase ): def _lowerCamelCase ( self , __magic_name__ ): """simple docstring""" for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ): _lowerCAmelCase = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(__magic_name__ ) def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = 'sshleifer/tiny-gpt2' _lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) _lowerCAmelCase = PyTorchBenchmark(__magic_name__ ) _lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = 'sgugger/tiny-distilbert-classification' _lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , only_pretrain_model=__magic_name__ , ) _lowerCAmelCase = PyTorchBenchmark(__magic_name__ ) _lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = 'sshleifer/tiny-gpt2' _lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , torchscript=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) _lowerCAmelCase = PyTorchBenchmark(__magic_name__ ) _lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' ) def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = 'sshleifer/tiny-gpt2' _lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , fpaa=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) _lowerCAmelCase = PyTorchBenchmark(__magic_name__ ) _lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = 'sshleifer/tiny-gpt2' _lowerCAmelCase = AutoConfig.from_pretrained(__magic_name__ ) # set architectures equal to `None` _lowerCAmelCase = None _lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) _lowerCAmelCase = PyTorchBenchmark(__magic_name__ , configs=[config] ) _lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = 'sshleifer/tiny-gpt2' _lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) _lowerCAmelCase = PyTorchBenchmark(__magic_name__ ) _lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' ) def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = 'sshleifer/tiny-gpt2' _lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__magic_name__ , multi_process=__magic_name__ , ) _lowerCAmelCase = PyTorchBenchmark(__magic_name__ ) _lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = 'sshleifer/tiny-gpt2' _lowerCAmelCase = AutoConfig.from_pretrained(__magic_name__ ) _lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) _lowerCAmelCase = PyTorchBenchmark(__magic_name__ , configs=[config] ) _lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = 'sshleifer/tinier_bart' _lowerCAmelCase = AutoConfig.from_pretrained(__magic_name__ ) _lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) _lowerCAmelCase = PyTorchBenchmark(__magic_name__ , configs=[config] ) _lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = 'sshleifer/tiny-gpt2' _lowerCAmelCase = AutoConfig.from_pretrained(__magic_name__ ) _lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) _lowerCAmelCase = PyTorchBenchmark(__magic_name__ , configs=[config] ) _lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = 'sshleifer/tinier_bart' _lowerCAmelCase = AutoConfig.from_pretrained(__magic_name__ ) _lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) _lowerCAmelCase = PyTorchBenchmark(__magic_name__ , configs=[config] ) _lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: _lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , save_to_csv=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__magic_name__ , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(__magic_name__ , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(__magic_name__ , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(__magic_name__ , 'train_time.csv' ) , env_info_csv_file=os.path.join(__magic_name__ , 'env.csv' ) , multi_process=__magic_name__ , ) _lowerCAmelCase = PyTorchBenchmark(__magic_name__ ) benchmark.run() self.assertTrue(Path(os.path.join(__magic_name__ , 'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(__magic_name__ , 'train_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(__magic_name__ , 'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(__magic_name__ , 'train_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(__magic_name__ , 'env.csv' ) ).exists() ) def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(__magic_name__ ): self.assertTrue(hasattr(__magic_name__ , 'sequential' ) ) self.assertTrue(hasattr(__magic_name__ , 'cumulative' ) ) self.assertTrue(hasattr(__magic_name__ , 'current' ) ) self.assertTrue(hasattr(__magic_name__ , 'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: _lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__magic_name__ , 'log.txt' ) , log_print=__magic_name__ , trace_memory_line_by_line=__magic_name__ , multi_process=__magic_name__ , ) _lowerCAmelCase = PyTorchBenchmark(__magic_name__ ) _lowerCAmelCase = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(__magic_name__ , 'log.txt' ) ).exists() )
309
0
'''simple docstring''' import unittest from transformers import GPTNeoXJapaneseConfig, is_torch_available from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel class snake_case : """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=7 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=99 , UpperCamelCase=32 , UpperCamelCase=5 , UpperCamelCase=4 , UpperCamelCase=4 , UpperCamelCase="gelu" , UpperCamelCase=0.0 , UpperCamelCase=0.1 , UpperCamelCase=True , UpperCamelCase=512 , UpperCamelCase=16 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=None , ): """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = seq_length lowerCamelCase_ = is_training lowerCamelCase_ = use_input_mask lowerCamelCase_ = use_token_type_ids lowerCamelCase_ = use_labels lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_multiple_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout lowerCamelCase_ = attention_dropout lowerCamelCase_ = weight_tying lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = type_vocab_size lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = initializer_range lowerCamelCase_ = num_labels lowerCamelCase_ = num_choices lowerCamelCase_ = scope def snake_case ( self ): """simple docstring""" lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase_ = None if self.use_input_mask: lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase_ = self.get_config() return config, input_ids, input_mask, token_labels def snake_case ( self ): """simple docstring""" return GPTNeoXJapaneseConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = self.prepare_config_and_inputs() lowerCamelCase_ = True return config, input_ids, input_mask, token_labels def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = GPTNeoXJapaneseModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = True lowerCamelCase_ = GPTNeoXJapaneseModel(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = GPTNeoXJapaneseForCausalLM(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = True lowerCamelCase_ = GPTNeoXJapaneseForCausalLM(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() # first forward pass lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase , use_cache=UpperCamelCase ) lowerCamelCase_ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowerCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCamelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowerCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCamelCase_ = torch.cat([input_mask, next_mask] , dim=-1 ) lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase , output_hidden_states=UpperCamelCase ) lowerCamelCase_ = output_from_no_past["hidden_states"][0] lowerCamelCase_ = model( UpperCamelCase , attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , output_hidden_states=UpperCamelCase , )["hidden_states"][0] # select random slice lowerCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCamelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach() lowerCamelCase_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class snake_case ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else () _lowerCamelCase = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else () _lowerCamelCase = ( {"feature-extraction": GPTNeoXJapaneseModel, "text-generation": GPTNeoXJapaneseForCausalLM} if is_torch_available() else {} ) _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False def snake_case ( self ): """simple docstring""" lowerCamelCase_ = GPTNeoXJapaneseModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 ) def snake_case ( self ): """simple docstring""" self.config_tester.run_common_tests() def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def snake_case ( self ): """simple docstring""" # This regression test was failing with PyTorch < 1.3 lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder() lowerCamelCase_ = None self.model_tester.create_and_check_model_as_decoder(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = "abeja/gpt-neox-japanese-2.7b" lowerCamelCase_ = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"] lowerCamelCase_ = [ "データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。", "100年後に必要とされる会社は、「人」が中心の会社です。", "フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。", "国境の長いトンネルを抜けると、そこは雪国だった。", "美味しい日本食といえば、やっぱりお寿司ですよね。", ] lowerCamelCase_ = GPTNeoXJapaneseTokenizer.from_pretrained(UpperCamelCase ) lowerCamelCase_ = GPTNeoXJapaneseForCausalLM.from_pretrained(UpperCamelCase ) lowerCamelCase_ = [] for prompt in prompts: lowerCamelCase_ = tokenizer(UpperCamelCase , return_tensors="pt" ).input_ids lowerCamelCase_ = model.generate(UpperCamelCase , max_length=50 ) lowerCamelCase_ = tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase ) predicted_outputs += generated_string self.assertListEqual(UpperCamelCase , UpperCamelCase )
675
'''simple docstring''' import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a_ : Any = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""") @require_sentencepiece @require_tokenizers class snake_case ( lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = GPTSwaTokenizer _lowerCamelCase = False _lowerCamelCase = True _lowerCamelCase = False def snake_case ( self ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase_ = GPTSwaTokenizer(UpperCamelCase , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case ( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = "This is a test" lowerCamelCase_ = "This is a test" return input_text, output_text def snake_case ( self ): """simple docstring""" lowerCamelCase_ = "<s>" lowerCamelCase_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(UpperCamelCase ) , 2000 ) def snake_case ( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 2000 ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = GPTSwaTokenizer(UpperCamelCase ) lowerCamelCase_ = tokenizer.tokenize("This is a test" ) self.assertListEqual(UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [465, 287, 265, 631, 842] ) lowerCamelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) # fmt: off self.assertListEqual( UpperCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , ) # fmt: on lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase ) self.assertListEqual( UpperCamelCase , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , ) lowerCamelCase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase ) # fmt: off self.assertListEqual( UpperCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] ) # fmt: on def snake_case ( self ): """simple docstring""" lowerCamelCase_ = GPTSwaTokenizer(UpperCamelCase ) lowerCamelCase_ = ["This is a test", "I was born in 92000, and this is falsé."] lowerCamelCase_ = [ [465, 287, 265, 631, 842], [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(UpperCamelCase , UpperCamelCase ): self.assertListEqual(tokenizer.encode_fast(UpperCamelCase ) , UpperCamelCase ) # Test that decode_fast returns the input text for text, token_ids in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(tokenizer.decode_fast(UpperCamelCase ) , UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = [ "<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')", "Hey there, how are you doing this fine day?", "This is a text with a trailing spaces followed by a dot .", "Häj sväjs lillebrör! =)", "Det är inget fel på Mr. Cool", ] # fmt: off lowerCamelCase_ = {"input_ids": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase , model_name="AI-Sweden/gpt-sw3-126m" , sequences=UpperCamelCase , )
675
1
"""simple docstring""" import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def lowerCamelCase__ ( UpperCAmelCase_=32 , UpperCAmelCase_=10 , UpperCAmelCase_=1_00 , UpperCAmelCase_=10_26 , UpperCAmelCase_=True , UpperCAmelCase_="data/tokenized_stories_train_wikitext103.jbl" , UpperCAmelCase_="igf_context_pairs.jbl" , )-> List[str]: """simple docstring""" set_seed(3 ) # generate train_data and objective_set UpperCamelCase , UpperCamelCase = generate_datasets( UpperCAmelCase_ , UpperCAmelCase_ , number=UpperCAmelCase_ , min_len=10_26 , trim=UpperCAmelCase_ ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? UpperCamelCase = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" ) # load pretrained model UpperCamelCase = load_gpta("gpt2" ).to(UpperCAmelCase_ ) print("computing perplexity on objective set" ) UpperCamelCase = compute_perplexity(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).item() print("perplexity on objective set:" , UpperCAmelCase_ ) # collect igf pairs and save to file demo.jbl collect_objective_set(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_=15 , UpperCAmelCase_=1_28 , UpperCAmelCase_=1_00 , UpperCAmelCase_="igf_model.pt" , )-> int: """simple docstring""" set_seed(42 ) # Load pre-trained model UpperCamelCase = GPTaLMHeadModel.from_pretrained("gpt2" ) # Initialize secondary learner to use embedding weights of model UpperCamelCase = SecondaryLearner(UpperCAmelCase_ ) # Train secondary learner UpperCamelCase = train_secondary_learner( UpperCAmelCase_ , UpperCAmelCase_ , max_epochs=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , eval_freq=1_00 , igf_model_path=UpperCAmelCase_ , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=32 , UpperCAmelCase_=10_00 , UpperCAmelCase_=16 , UpperCAmelCase_=1.0 , UpperCAmelCase_=recopy_gpta , UpperCAmelCase_=None , UpperCAmelCase_=10 , UpperCAmelCase_="gpt2_finetuned.pt" , )-> Optional[int]: """simple docstring""" UpperCamelCase = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" ) UpperCamelCase = RandomSampler(UpperCAmelCase_ ) UpperCamelCase = DataLoader(UpperCAmelCase_ , sampler=UpperCAmelCase_ ) UpperCamelCase = max_steps // (len(UpperCAmelCase_ )) + 1 UpperCamelCase = 0 UpperCamelCase = torch.zeros((1, context_len) , dtype=torch.long , device=UpperCAmelCase_ ) UpperCamelCase , UpperCamelCase , UpperCamelCase = recopy_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) model.train() if secondary_learner is not None: secondary_learner.to(UpperCAmelCase_ ) secondary_learner.eval() UpperCamelCase = [] UpperCamelCase = 0 UpperCamelCase = [] UpperCamelCase = [] # Compute the performance of the transformer model at the beginning UpperCamelCase = compute_perplexity(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) test_perps.append(UpperCAmelCase_ ) print("Test perplexity, step" , UpperCAmelCase_ , ":" , UpperCAmelCase_ ) for epoch in range(int(UpperCAmelCase_ ) ): for step, example in enumerate(UpperCAmelCase_ ): torch.cuda.empty_cache() UpperCamelCase = random.randint(0 , example.size(2 ) - context_len - 1 ) UpperCamelCase = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() UpperCamelCase = model(UpperCAmelCase_ , labels=UpperCAmelCase_ ) UpperCamelCase = True if secondary_learner is not None: UpperCamelCase = secondary_learner.forward( torch.tensor(UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ ).unsqueeze(0 ) )[0].item() observed_qs.append(float(UpperCAmelCase_ ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: UpperCamelCase = -1 if predicted_q < threshold: UpperCamelCase = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) UpperCamelCase = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() UpperCamelCase = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: UpperCamelCase = compute_perplexity(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) test_perps.append(UpperCAmelCase_ ) print("Test perplexity, step" , UpperCAmelCase_ , ":" , UpperCAmelCase_ ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , UpperCAmelCase_ ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def lowerCamelCase__ ( )-> List[str]: """simple docstring""" UpperCamelCase = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" ) # Required parameters parser.add_argument( "--data_dir" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="The input data dir. Should contain data files for WikiText." , ) parser.add_argument( "--model_name_or_path" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to pretrained model or model identifier from huggingface.co/models" , ) parser.add_argument( "--data_file" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help=( "A jbl file containing tokenized data which can be split as objective dataset, " "train_dataset and test_dataset." ) , ) parser.add_argument( "--igf_data_file" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help="A jbl file containing the context and information gain pairs to train secondary learner." , ) parser.add_argument( "--output_dir" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="The output directory where the final fine-tuned model is stored." , ) parser.add_argument( "--tokenizer_name" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="Pretrained tokenizer name or path if not the same as model_name" , ) parser.add_argument("--seed" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help="A seed for reproducible training." ) parser.add_argument( "--context_len" , default=32 , type=UpperCAmelCase_ , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--size_objective_set" , default=1_00 , type=UpperCAmelCase_ , help="number of articles that are long enough to be used as our objective set" , ) parser.add_argument( "--eval_freq" , default=1_00 , type=UpperCAmelCase_ , help="secondary model evaluation is triggered at eval_freq" ) parser.add_argument("--max_steps" , default=10_00 , type=UpperCAmelCase_ , help="To calculate training epochs" ) parser.add_argument( "--secondary_learner_batch_size" , default=1_28 , type=UpperCAmelCase_ , help="batch size of training data for secondary learner" , ) parser.add_argument( "--batch_size" , default=16 , type=UpperCAmelCase_ , help="batch size of training data of language model(gpt2) " ) parser.add_argument( "--eval_interval" , default=10 , type=UpperCAmelCase_ , help=( "decay the selectivity of our secondary learner filter from" "1 standard deviation above average to 1 below average after 10 batches" ) , ) parser.add_argument( "--number" , default=1_00 , type=UpperCAmelCase_ , help="The number of examples split to be used as objective_set/test_data" ) parser.add_argument( "--min_len" , default=10_26 , type=UpperCAmelCase_ , help="The minimum length of the article to be used as objective set" ) parser.add_argument( "--secondary_learner_max_epochs" , default=15 , type=UpperCAmelCase_ , help="number of epochs to train secondary learner" ) parser.add_argument("--trim" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="truncate the example if it exceeds context length" ) parser.add_argument( "--threshold" , default=1.0 , type=UpperCAmelCase_ , help=( "The threshold value used by secondary learner to filter the train_data and allow only" " informative data as input to the model" ) , ) parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=UpperCAmelCase_ , help="finetuned_model_name" ) parser.add_argument( "--recopy_model" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=UpperCAmelCase_ , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , ) # Load train data for secondary learner UpperCamelCase = joblib.load("data/IGF_values.jbl" ) # Train secondary learner UpperCamelCase = training_secondary_learner( UpperCAmelCase_ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path="igf_model.pt" , ) # load pretrained gpt2 model UpperCamelCase = GPTaLMHeadModel.from_pretrained("gpt2" ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model UpperCamelCase , UpperCamelCase = generate_datasets( context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=1_00 , min_len=10_26 , trim=UpperCAmelCase_ ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=UpperCAmelCase_ , secondary_learner=UpperCAmelCase_ , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , ) if __name__ == "__main__": main()
556
"""simple docstring""" import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class __a ( _lowerCAmelCase ): UpperCamelCase_ : Any = (EulerDiscreteScheduler,) UpperCamelCase_ : Dict = 10 def _SCREAMING_SNAKE_CASE ( self : Any , **UpperCAmelCase_ : str )-> Union[str, Any]: """simple docstring""" UpperCamelCase = { "num_train_timesteps": 1_100, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**UpperCAmelCase_ ) return config def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> Any: """simple docstring""" for timesteps in [10, 50, 100, 1_000]: self.check_over_configs(num_train_timesteps=UpperCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : int )-> Any: """simple docstring""" for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : str )-> Dict: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=UpperCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> int: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=UpperCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : str )-> Any: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**UpperCAmelCase_ ) scheduler.set_timesteps(self.num_inference_steps ) UpperCamelCase = torch.manual_seed(0 ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma UpperCamelCase = sample.to(UpperCAmelCase_ ) for i, t in enumerate(scheduler.timesteps ): UpperCamelCase = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ ) UpperCamelCase = model(UpperCAmelCase_ , UpperCAmelCase_ ) UpperCamelCase = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ ) UpperCamelCase = output.prev_sample UpperCamelCase = torch.sum(torch.abs(UpperCAmelCase_ ) ) UpperCamelCase = torch.mean(torch.abs(UpperCAmelCase_ ) ) assert abs(result_sum.item() - 10.0807 ) < 1e-2 assert abs(result_mean.item() - 0.0131 ) < 1e-3 def _SCREAMING_SNAKE_CASE ( self : List[str] )-> str: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config(prediction_type="v_prediction" ) UpperCamelCase = scheduler_class(**UpperCAmelCase_ ) scheduler.set_timesteps(self.num_inference_steps ) UpperCamelCase = torch.manual_seed(0 ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma UpperCamelCase = sample.to(UpperCAmelCase_ ) for i, t in enumerate(scheduler.timesteps ): UpperCamelCase = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ ) UpperCamelCase = model(UpperCAmelCase_ , UpperCAmelCase_ ) UpperCamelCase = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ ) UpperCamelCase = output.prev_sample UpperCamelCase = torch.sum(torch.abs(UpperCAmelCase_ ) ) UpperCamelCase = torch.mean(torch.abs(UpperCAmelCase_ ) ) assert abs(result_sum.item() - 0.0002 ) < 1e-2 assert abs(result_mean.item() - 2.2_676e-06 ) < 1e-3 def _SCREAMING_SNAKE_CASE ( self : List[str] )-> Tuple: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**UpperCAmelCase_ ) scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase_ ) UpperCamelCase = torch.manual_seed(0 ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() UpperCamelCase = sample.to(UpperCAmelCase_ ) for t in scheduler.timesteps: UpperCamelCase = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ ) UpperCamelCase = model(UpperCAmelCase_ , UpperCAmelCase_ ) UpperCamelCase = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ ) UpperCamelCase = output.prev_sample UpperCamelCase = torch.sum(torch.abs(UpperCAmelCase_ ) ) UpperCamelCase = torch.mean(torch.abs(UpperCAmelCase_ ) ) assert abs(result_sum.item() - 10.0807 ) < 1e-2 assert abs(result_mean.item() - 0.0131 ) < 1e-3 def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Any: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**UpperCAmelCase_ , use_karras_sigmas=UpperCAmelCase_ ) scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase_ ) UpperCamelCase = torch.manual_seed(0 ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() UpperCamelCase = sample.to(UpperCAmelCase_ ) for t in scheduler.timesteps: UpperCamelCase = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ ) UpperCamelCase = model(UpperCAmelCase_ , UpperCAmelCase_ ) UpperCamelCase = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ ) UpperCamelCase = output.prev_sample UpperCamelCase = torch.sum(torch.abs(UpperCAmelCase_ ) ) UpperCamelCase = torch.mean(torch.abs(UpperCAmelCase_ ) ) assert abs(result_sum.item() - 124.52299499511719 ) < 1e-2 assert abs(result_mean.item() - 0.16213932633399963 ) < 1e-3
556
1
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer UpperCamelCase = ["bert-base-uncased", "bert-base-cased"] UpperCamelCase = "hf-internal-testing/tiny-bert-tf-only" if is_tf_available(): class lowerCAmelCase_ ( tf.keras.Model ): def __init__( self , _lowerCAmelCase ): super().__init__() _lowercase : Optional[int] = tokenizer _lowercase : Dict = AutoConfig.from_pretrained(_lowerCAmelCase ) _lowercase : Any = TFAutoModel.from_config(_lowerCAmelCase ) def __a ( self , _lowerCAmelCase ): _lowercase : int = self.tokenizer(_lowerCAmelCase ) _lowercase : int = self.bert(**_lowerCAmelCase ) return out["pooler_output"] @require_tf @require_tensorflow_text class lowerCAmelCase_ ( unittest.TestCase ): def __a ( self ): super().setUp() _lowercase : int = [ BertTokenizer.from_pretrained(_lowerCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false _lowercase : Optional[int] = [TFBertTokenizer.from_pretrained(_lowerCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(_lowerCAmelCase , use_fast_bert_tokenizer=_lowerCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers ) == len(self.tf_tokenizers ) _lowercase : Any = [ 'This is a straightforward English test sentence.', 'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.', 'Now we\'re going to add some Chinese: 一 二 三 一二三', 'And some much more rare Chinese: 齉 堃 齉堃', 'Je vais aussi écrire en français pour tester les accents', 'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ', ] _lowercase : Union[str, Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def __a ( self ): for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in (self.test_sentences, self.paired_sentences): _lowercase : str = tokenizer(_lowerCAmelCase , return_tensors='tf' , padding='longest' ) _lowercase : int = tf_tokenizer(_lowerCAmelCase ) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) ) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) ) @slow def __a ( self ): for tf_tokenizer in self.tf_tokenizers: _lowercase : Union[str, Any] = tf_tokenizer(self.paired_sentences ) _lowercase : Any = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) ) @slow def __a ( self ): for tf_tokenizer in self.tf_tokenizers: _lowercase : Dict = tf.function(_lowerCAmelCase ) for test_inputs in (self.test_sentences, self.paired_sentences): _lowercase : List[str] = tf.constant(_lowerCAmelCase ) _lowercase : Union[str, Any] = compiled_tokenizer(_lowerCAmelCase ) _lowercase : Any = tf_tokenizer(_lowerCAmelCase ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def __a ( self ): for tf_tokenizer in self.tf_tokenizers: _lowercase : Dict = ModelToSave(tokenizer=_lowerCAmelCase ) _lowercase : Any = tf.convert_to_tensor(self.test_sentences ) _lowercase : List[Any] = model(_lowerCAmelCase ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: _lowercase : int = Path(_lowerCAmelCase ) / 'saved.model' model.save(_lowerCAmelCase ) _lowercase : Any = tf.keras.models.load_model(_lowerCAmelCase ) _lowercase : List[Any] = loaded_model(_lowerCAmelCase ) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
66
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = { """kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""", """kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""", """kssteven/ibert-roberta-large-mnli""": ( """https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json""" ), } class UpperCAmelCase ( A_ ): A__ : Optional[int] = "ibert" def __init__(self : List[Any] , snake_case__ : int=3_05_22 , snake_case__ : int=7_68 , snake_case__ : Any=12 , snake_case__ : str=12 , snake_case__ : Optional[int]=30_72 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Any=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : str=5_12 , snake_case__ : Optional[int]=2 , snake_case__ : Any=0.02 , snake_case__ : List[Any]=1e-12 , snake_case__ : Optional[Any]=1 , snake_case__ : List[Any]=0 , snake_case__ : str=2 , snake_case__ : Tuple="absolute" , snake_case__ : List[str]=False , snake_case__ : Tuple="none" , **snake_case__ : Tuple , ) -> Union[str, Any]: '''simple docstring''' super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ ) snake_case : str = vocab_size snake_case : Tuple = hidden_size snake_case : Optional[Any] = num_hidden_layers snake_case : Optional[int] = num_attention_heads snake_case : List[Any] = hidden_act snake_case : Optional[Any] = intermediate_size snake_case : Dict = hidden_dropout_prob snake_case : List[str] = attention_probs_dropout_prob snake_case : List[str] = max_position_embeddings snake_case : Union[str, Any] = type_vocab_size snake_case : Optional[int] = initializer_range snake_case : Optional[int] = layer_norm_eps snake_case : int = position_embedding_type snake_case : Union[str, Any] = quant_mode snake_case : Dict = force_dequant class UpperCAmelCase ( A_ ): @property def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": snake_case : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"} else: snake_case : List[Any] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
204
0
'''simple docstring''' from __future__ import annotations from bisect import bisect_left from functools import total_ordering from heapq import merge @total_ordering class lowercase ( _lowercase ): """simple docstring""" def __lt__( self , __snake_case): return self[-1] < other[-1] def __eq__( self , __snake_case): return self[-1] == other[-1] def lowerCamelCase_ ( UpperCAmelCase_ : list ) -> list: '''simple docstring''' _UpperCamelCase : list[Stack] = [] # sort into stacks for element in collection: _UpperCamelCase : Any = Stack([element] ) _UpperCamelCase : Optional[int] = bisect_left(UpperCAmelCase_ , UpperCAmelCase_ ) if i != len(UpperCAmelCase_ ): stacks[i].append(UpperCAmelCase_ ) else: stacks.append(UpperCAmelCase_ ) # use a heap-based merge to merge stack efficiently _UpperCamelCase : str = merge(*(reversed(UpperCAmelCase_ ) for stack in stacks) ) return collection if __name__ == "__main__": lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip() lowerCAmelCase__ = [int(item) for item in user_input.split(""",""")] print(patience_sort(unsorted))
711
lowerCAmelCase__ = range(2, 2_0 + 1) lowerCAmelCase__ = [1_0**k for k in range(ks[-1] + 1)] lowerCAmelCase__ = {} def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ) -> Tuple: '''simple docstring''' _UpperCamelCase : Dict = sum(a_i[j] for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ) _UpperCamelCase : Optional[int] = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) ) _UpperCamelCase , _UpperCamelCase : Dict = 0, 0 _UpperCamelCase : Optional[int] = n - i _UpperCamelCase : Union[str, Any] = memo.get(UpperCAmelCase_ ) if sub_memo is not None: _UpperCamelCase : str = sub_memo.get(UpperCAmelCase_ ) if jumps is not None and len(UpperCAmelCase_ ) > 0: # find and make the largest jump without going over _UpperCamelCase : str = -1 for _k in range(len(UpperCAmelCase_ ) - 1 , -1 , -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: _UpperCamelCase : Optional[Any] = _k break if max_jump >= 0: _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = jumps[max_jump] # since the difference between jumps is cached, add c _UpperCamelCase : Tuple = diff + c for j in range(min(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ): _UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 ) if new_c > 0: add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) else: _UpperCamelCase : Union[str, Any] = [] else: _UpperCamelCase : List[Any] = {c: []} _UpperCamelCase : Optional[int] = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps _UpperCamelCase , _UpperCamelCase : Optional[Any] = next_term(UpperCAmelCase_ , k - 1 , i + dn , UpperCAmelCase_ ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead _UpperCamelCase , _UpperCamelCase : Any = compute(UpperCAmelCase_ , UpperCAmelCase_ , i + dn , UpperCAmelCase_ ) diff += _diff dn += terms_jumped _UpperCamelCase : List[str] = sub_memo[c] # keep jumps sorted by # of terms skipped _UpperCamelCase : Union[str, Any] = 0 while j < len(UpperCAmelCase_ ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(UpperCAmelCase_ , (diff, dn, k) ) return (diff, dn) def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) -> Dict: '''simple docstring''' if i >= n: return 0, i if k > len(UpperCAmelCase_ ): a_i.extend([0 for _ in range(k - len(UpperCAmelCase_ ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) _UpperCamelCase : Any = i _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Any = 0, 0, 0 for j in range(len(UpperCAmelCase_ ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 _UpperCamelCase : Union[str, Any] = ds_c + ds_b diff += addend _UpperCamelCase : Union[str, Any] = 0 for j in range(UpperCAmelCase_ ): _UpperCamelCase : Union[str, Any] = a_i[j] + addend _UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) return diff, i - start_i def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) -> Dict: '''simple docstring''' for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ): _UpperCamelCase : List[str] = digits[j] + addend if s >= 1_0: _UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 ) _UpperCamelCase : Union[str, Any] = addend // 1_0 + quotient else: _UpperCamelCase : Dict = s _UpperCamelCase : Optional[Any] = addend // 1_0 if addend == 0: break while addend > 0: _UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 ) digits.append(UpperCAmelCase_ ) def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0**1_5 ) -> int: '''simple docstring''' _UpperCamelCase : Optional[Any] = [1] _UpperCamelCase : Optional[int] = 1 _UpperCamelCase : int = 0 while True: _UpperCamelCase , _UpperCamelCase : List[Any] = next_term(UpperCAmelCase_ , 2_0 , i + dn , UpperCAmelCase_ ) dn += terms_jumped if dn == n - i: break _UpperCamelCase : str = 0 for j in range(len(UpperCAmelCase_ ) ): a_n += digits[j] * 1_0**j return a_n if __name__ == "__main__": print(f'{solution() = }')
648
0
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class _lowerCAmelCase ( __a ): _lowercase =42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
290
from math import loga def lowerCamelCase__ ( __lowerCAmelCase : int ): """simple docstring""" if a < 0: raise ValueError("Input value must be a positive integer" ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError("Input value must be a 'int' type" ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
290
1
from functools import reduce lowerCamelCase__ = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def lowercase_ ( SCREAMING_SNAKE_CASE : str = N ): """simple docstring""" return max( # mypy cannot properly interpret reduce int(reduce(lambda SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str(int(SCREAMING_SNAKE_CASE ) * int(SCREAMING_SNAKE_CASE ) ) , n[i : i + 13] ) ) for i in range(len(SCREAMING_SNAKE_CASE ) - 12 ) ) if __name__ == "__main__": print(F"""{solution() = }""")
702
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ =StableDiffusionSAGPipeline lowerCAmelCase__ =TEXT_TO_IMAGE_PARAMS lowerCAmelCase__ =TEXT_TO_IMAGE_BATCH_PARAMS lowerCAmelCase__ =TEXT_TO_IMAGE_IMAGE_PARAMS lowerCAmelCase__ =TEXT_TO_IMAGE_IMAGE_PARAMS lowerCAmelCase__ =False def UpperCAmelCase ( self ) -> Optional[int]: """simple docstring""" torch.manual_seed(0 ) snake_case__ : List[str] =UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) snake_case__ : List[str] =DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , ) torch.manual_seed(0 ) snake_case__ : int =AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) snake_case__ : Optional[Any] =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) snake_case__ : Optional[int] =CLIPTextModel(__SCREAMING_SNAKE_CASE ) snake_case__ : List[str] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) snake_case__ : Optional[int] ={ '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> Dict: """simple docstring""" if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ): snake_case__ : Optional[int] =torch.manual_seed(__SCREAMING_SNAKE_CASE ) else: snake_case__ : Dict =torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE ) snake_case__ : Optional[Any] ={ '''prompt''': '''.''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 1.0, '''sag_scale''': 1.0, '''output_type''': '''numpy''', } return inputs def UpperCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self ) -> List[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" snake_case__ : Tuple =StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ) snake_case__ : Any =sag_pipe.to(__SCREAMING_SNAKE_CASE ) sag_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) snake_case__ : str ='''.''' snake_case__ : Tuple =torch.manual_seed(0 ) snake_case__ : Optional[int] =sag_pipe( [prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' ) snake_case__ : Optional[int] =output.images snake_case__ : Dict =image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) snake_case__ : Optional[Any] =np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def UpperCAmelCase ( self ) -> Optional[int]: """simple docstring""" snake_case__ : Tuple =StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) snake_case__ : Any =sag_pipe.to(__SCREAMING_SNAKE_CASE ) sag_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) snake_case__ : Dict ='''.''' snake_case__ : Union[str, Any] =torch.manual_seed(0 ) snake_case__ : int =sag_pipe( [prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' ) snake_case__ : List[Any] =output.images snake_case__ : List[str] =image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) snake_case__ : Union[str, Any] =np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def UpperCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" snake_case__ : Optional[Any] =StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) snake_case__ : Dict =sag_pipe.to(__SCREAMING_SNAKE_CASE ) sag_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) snake_case__ : Optional[int] ='''.''' snake_case__ : str =torch.manual_seed(0 ) snake_case__ : Any =sag_pipe( [prompt] , width=768 , height=512 , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , ) snake_case__ : Any =output.images assert image.shape == (1, 512, 768, 3)
408
0
from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function _lowerCamelCase =1.0_5_4_5_7_1_8_1_7E-3_4 # unit of ℏ : J * s _lowerCamelCase =3E8 # unit of c : m * s^-1 def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): if (force, area, distance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if force < 0: raise ValueError("""Magnitude of force can not be negative""" ) if distance < 0: raise ValueError("""Distance can not be negative""" ) if area < 0: raise ValueError("""Area can not be negative""" ) if force == 0: lowerCamelCase : str = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: lowerCamelCase : Union[str, Any] = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: lowerCamelCase : Union[str, Any] = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("""One and only one argument must be 0""" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
681
from functools import reduce _A = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def lowerCamelCase__ ( __lowerCAmelCase : str = N ): """simple docstring""" return max( # mypy cannot properly interpret reduce int(reduce(lambda __lowerCAmelCase , __lowerCAmelCase : str(int(__lowerCAmelCase ) * int(__lowerCAmelCase ) ) , n[i : i + 13] ) ) for i in range(len(__lowerCAmelCase ) - 12 ) ) if __name__ == "__main__": print(f"""{solution() = }""")
290
0
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """adapter_layer""": """encoder.layers.*.adapter_layer""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", """pooling_layer.linear""": """projector""", """pooling_layer.projection""": """classifier""", } __lowerCAmelCase = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """projector""", """classifier""", ] def _lowercase ( a__ : List[Any] ) -> Optional[Any]: """simple docstring""" _UpperCamelCase = {} with open(A__ , "r" ) as file: for line_number, line in enumerate(A__ ): _UpperCamelCase = line.strip() if line: _UpperCamelCase = line.split() _UpperCamelCase = line_number _UpperCamelCase = words[0] _UpperCamelCase = value return result def _lowercase ( a__ : Union[str, Any] , a__ : Tuple , a__ : str , a__ : Union[str, Any] , a__ : Optional[int] ) -> Optional[int]: """simple docstring""" for attribute in key.split("." ): _UpperCamelCase = getattr(A__ , A__ ) _UpperCamelCase = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(A__ ): _UpperCamelCase = PARAM_MAPPING[full_name.split("." )[-1]] _UpperCamelCase = "param" if weight_type is not None and weight_type != "param": _UpperCamelCase = getattr(A__ , A__ ).shape elif weight_type is not None and weight_type == "param": _UpperCamelCase = hf_pointer for attribute in hf_param_name.split("." ): _UpperCamelCase = getattr(A__ , A__ ) _UpperCamelCase = shape_pointer.shape # let's reduce dimension _UpperCamelCase = value[0] else: _UpperCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": _UpperCamelCase = value elif weight_type == "weight_g": _UpperCamelCase = value elif weight_type == "weight_v": _UpperCamelCase = value elif weight_type == "bias": _UpperCamelCase = value elif weight_type == "param": for attribute in hf_param_name.split("." ): _UpperCamelCase = getattr(A__ , A__ ) _UpperCamelCase = value else: _UpperCamelCase = value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def _lowercase ( a__ : List[str] , a__ : Union[str, Any] , a__ : str , a__ : List[Any] , a__ : int ) -> List[str]: """simple docstring""" _UpperCamelCase = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(A__ ): _UpperCamelCase = PARAM_MAPPING[full_name.split("." )[-1]] _UpperCamelCase = "param" if weight_type is not None and weight_type != "param": _UpperCamelCase = ".".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": _UpperCamelCase = ".".join([key, hf_param_name] ) else: _UpperCamelCase = key _UpperCamelCase = value if "lm_head" in full_key else value[0] __lowerCAmelCase = { """W_a""": """linear_1.weight""", """W_b""": """linear_2.weight""", """b_a""": """linear_1.bias""", """b_b""": """linear_2.bias""", """ln_W""": """norm.weight""", """ln_b""": """norm.bias""", } def _lowercase ( a__ : Any , a__ : List[Any] , a__ : Any=None , a__ : Union[str, Any]=None ) -> List[str]: """simple docstring""" _UpperCamelCase = False for key, mapped_key in MAPPING.items(): _UpperCamelCase = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: _UpperCamelCase = True if "*" in mapped_key: _UpperCamelCase = name.split(A__ )[0].split("." )[-2] _UpperCamelCase = mapped_key.replace("*" , A__ ) if "weight_g" in name: _UpperCamelCase = "weight_g" elif "weight_v" in name: _UpperCamelCase = "weight_v" elif "bias" in name: _UpperCamelCase = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj _UpperCamelCase = "weight" else: _UpperCamelCase = None if hf_dict is not None: rename_dict(A__ , A__ , A__ , A__ , A__ ) else: set_recursively(A__ , A__ , A__ , A__ , A__ ) return is_used return is_used def _lowercase ( a__ : Union[str, Any] , a__ : Tuple , a__ : int ) -> List[str]: """simple docstring""" _UpperCamelCase = [] _UpperCamelCase = fairseq_model.state_dict() _UpperCamelCase = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): _UpperCamelCase = False if "conv_layers" in name: load_conv_layer( A__ , A__ , A__ , A__ , hf_model.config.feat_extract_norm == "group" , ) _UpperCamelCase = True else: _UpperCamelCase = load_wavaveca_layer(A__ , A__ , A__ ) if not is_used: unused_weights.append(A__ ) logger.warning(f'''Unused weights: {unused_weights}''' ) def _lowercase ( a__ : int , a__ : int , a__ : str , a__ : int , a__ : Union[str, Any] ) -> Tuple: """simple docstring""" _UpperCamelCase = full_name.split("conv_layers." )[-1] _UpperCamelCase = name.split("." ) _UpperCamelCase = int(items[0] ) _UpperCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _UpperCamelCase = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _UpperCamelCase = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) _UpperCamelCase = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) _UpperCamelCase = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(A__ ) @torch.no_grad() def _lowercase ( a__ : List[Any] , a__ : Optional[int] , a__ : Dict=None , a__ : Tuple=None , a__ : List[str]=True , a__ : Optional[int]=False ) -> Optional[int]: """simple docstring""" if config_path is not None: _UpperCamelCase = WavaVecaConfig.from_pretrained(A__ ) else: _UpperCamelCase = WavaVecaConfig() if is_seq_class: _UpperCamelCase = read_txt_into_dict(A__ ) _UpperCamelCase = idalabel _UpperCamelCase = WavaVecaForSequenceClassification(A__ ) _UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=A__ , return_attention_mask=A__ , ) feature_extractor.save_pretrained(A__ ) elif is_finetuned: if dict_path: _UpperCamelCase = Dictionary.load(A__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _UpperCamelCase = target_dict.pad_index _UpperCamelCase = target_dict.bos_index _UpperCamelCase = target_dict.eos_index _UpperCamelCase = len(target_dict.symbols ) _UpperCamelCase = os.path.join(A__ , "vocab.json" ) if not os.path.isdir(A__ ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(A__ ) ) return os.makedirs(A__ , exist_ok=A__ ) _UpperCamelCase = target_dict.indices # fairseq has the <pad> and <s> switched _UpperCamelCase = 0 _UpperCamelCase = 1 with open(A__ , "w" , encoding="utf-8" ) as vocab_handle: json.dump(A__ , A__ ) _UpperCamelCase = WavaVecaCTCTokenizer( A__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=A__ , ) _UpperCamelCase = True if config.feat_extract_norm == "layer" else False _UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=A__ , return_attention_mask=A__ , ) _UpperCamelCase = WavaVecaProcessor(feature_extractor=A__ , tokenizer=A__ ) processor.save_pretrained(A__ ) _UpperCamelCase = WavaVecaForCTC(A__ ) else: _UpperCamelCase = WavaVecaForPreTraining(A__ ) if is_finetuned or is_seq_class: _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: _UpperCamelCase = argparse.Namespace(task="audio_pretraining" ) _UpperCamelCase = fairseq.tasks.setup_task(A__ ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=A__ ) _UpperCamelCase = model[0].eval() recursively_load_weights(A__ , A__ , not is_finetuned ) hf_wavavec.save_pretrained(A__ ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) parser.add_argument( """--is_seq_class""", action="""store_true""", help="""Whether the model to convert is a fine-tuned sequence classification model or not""", ) __lowerCAmelCase = parser.parse_args() __lowerCAmelCase = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
714
import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class lowerCamelCase_ ( lowercase , lowercase ): @register_to_config def __init__( self , *, lowerCamelCase_ = 4 , lowerCamelCase_ = 7_68 , lowerCamelCase_ , lowerCamelCase_ , ) -> int: """simple docstring""" super().__init__() _UpperCamelCase = nn.Parameter(torch.zeros(lowerCamelCase_ ) ) # parameters for additional clip time embeddings _UpperCamelCase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ ) _UpperCamelCase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ ) # parameters for encoder hidden states _UpperCamelCase = clip_extra_context_tokens _UpperCamelCase = nn.Linear( lowerCamelCase_ , self.clip_extra_context_tokens * cross_attention_dim ) _UpperCamelCase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ ) _UpperCamelCase = nn.LayerNorm(lowerCamelCase_ ) def lowercase ( self , *, lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int: """simple docstring""" if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings _UpperCamelCase = image_embeddings.shape[0] _UpperCamelCase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) _UpperCamelCase = classifier_free_guidance_embeddings.expand( lowerCamelCase_ , -1 ) _UpperCamelCase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] _UpperCamelCase = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... _UpperCamelCase = self.embedding_proj(lowerCamelCase_ ) _UpperCamelCase = self.clip_image_embeddings_project_to_time_embeddings(lowerCamelCase_ ) _UpperCamelCase = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" _UpperCamelCase = self.clip_extra_context_tokens_proj(lowerCamelCase_ ) _UpperCamelCase = clip_extra_context_tokens.reshape(lowerCamelCase_ , -1 , self.clip_extra_context_tokens ) _UpperCamelCase = clip_extra_context_tokens.permute(0 , 2 , 1 ) _UpperCamelCase = self.encoder_hidden_states_proj(lowerCamelCase_ ) _UpperCamelCase = self.text_encoder_hidden_states_norm(lowerCamelCase_ ) _UpperCamelCase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
589
0
class UpperCAmelCase__ : """simple docstring""" def __init__( self : Optional[int] ) -> str: SCREAMING_SNAKE_CASE__ = {} def lowercase_ ( self : int ) -> List[Any]: print(self.vertex ) for i in self.vertex: print(_UpperCamelCase , ''' -> ''' , ''' -> '''.join([str(_UpperCamelCase ) for j in self.vertex[i]] ) ) def lowercase_ ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> Tuple: # check if vertex is already present, if from_vertex in self.vertex: self.vertex[from_vertex].append(_UpperCamelCase ) else: # else make a new vertex SCREAMING_SNAKE_CASE__ = [to_vertex] def lowercase_ ( self : int ) -> Any: # visited array for storing already visited nodes SCREAMING_SNAKE_CASE__ = [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(_UpperCamelCase , _UpperCamelCase ) def lowercase_ ( self : int , __lowerCamelCase : int , __lowerCamelCase : list ) -> int: # mark start vertex as visited SCREAMING_SNAKE_CASE__ = True print(_UpperCamelCase , end=''' ''' ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(_UpperCamelCase , _UpperCamelCase ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE : Optional[Any] = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print('''DFS:''') g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
493
import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__=1_0_2_4 , __magic_name__=1_0_2_4 , __magic_name__=False , **__magic_name__ ): _lowercase: List[Any] = AutoTokenizer.from_pretrained(__magic_name__ ) _lowercase: Dict = SeqaSeqDataset(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , type_path="train" , **__magic_name__ ) _lowercase: Union[str, Any] = tok.pad_token_id def get_lens(__magic_name__ ): _lowercase: Union[str, Any] = tqdm( DataLoader(__magic_name__ , batch_size=5_1_2 , num_workers=8 , shuffle=__magic_name__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) _lowercase: Dict = [] for batch in dl: _lowercase: Any = batch["input_ids"].ne(__magic_name__ ).sum(1 ).tolist() _lowercase: Dict = batch["labels"].ne(__magic_name__ ).sum(1 ).tolist() if consider_target: for src, tgt in zip(__magic_name__ , __magic_name__ ): max_lens.append(max(__magic_name__ , __magic_name__ ) ) else: max_lens.extend(__magic_name__ ) return max_lens _lowercase: Optional[Any] = get_lens(__magic_name__ ) _lowercase: Tuple = SeqaSeqDataset(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , type_path="val" , **__magic_name__ ) _lowercase: Union[str, Any] = get_lens(__magic_name__ ) pickle_save(__magic_name__ , train_ds.len_file ) pickle_save(__magic_name__ , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
226
0
import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset lowerCAmelCase__ = random.Random() def __lowerCamelCase ( __a : Optional[int] , __a : Dict=1.0 , __a : Optional[Any]=None , __a : Any=None ) -> List[Any]: if rng is None: _lowercase =global_rng _lowercase =[] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class _a ( unittest.TestCase ): """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=400 , lowerCAmelCase_=2000 , lowerCAmelCase_=2048 , lowerCAmelCase_=128 , lowerCAmelCase_=1 , lowerCAmelCase_=512 , lowerCAmelCase_=30 , lowerCAmelCase_=44100 , ): _lowercase =parent _lowercase =batch_size _lowercase =min_seq_length _lowercase =max_seq_length _lowercase =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _lowercase =spectrogram_length _lowercase =feature_size _lowercase =num_audio_channels _lowercase =hop_length _lowercase =chunk_length _lowercase =sampling_rate def __lowerCAmelCase ( self ): return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def __lowerCAmelCase ( self , lowerCAmelCase_=False , lowerCAmelCase_=False ): def _flatten(lowerCAmelCase_ ): return list(itertools.chain(*lowerCAmelCase_ ) ) if equal_length: _lowercase =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size _lowercase =[ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: _lowercase =[np.asarray(lowerCAmelCase_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class _a ( lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __SCREAMING_SNAKE_CASE = TvltFeatureExtractor def __lowerCAmelCase ( self ): _lowercase =TvltFeatureExtractionTester(self ) def __lowerCAmelCase ( self ): _lowercase =self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(lowerCAmelCase_ , "spectrogram_length" ) ) self.assertTrue(hasattr(lowerCAmelCase_ , "feature_size" ) ) self.assertTrue(hasattr(lowerCAmelCase_ , "num_audio_channels" ) ) self.assertTrue(hasattr(lowerCAmelCase_ , "hop_length" ) ) self.assertTrue(hasattr(lowerCAmelCase_ , "chunk_length" ) ) self.assertTrue(hasattr(lowerCAmelCase_ , "sampling_rate" ) ) def __lowerCAmelCase ( self ): _lowercase =self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _lowercase =feat_extract_first.save_pretrained(lowerCAmelCase_ )[0] check_json_file_has_correct_format(lowerCAmelCase_ ) _lowercase =self.feature_extraction_class.from_pretrained(lowerCAmelCase_ ) _lowercase =feat_extract_first.to_dict() _lowercase =feat_extract_second.to_dict() _lowercase =dict_first.pop("mel_filters" ) _lowercase =dict_second.pop("mel_filters" ) self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ ) ) self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCAmelCase ( self ): _lowercase =self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _lowercase =os.path.join(lowerCAmelCase_ , "feat_extract.json" ) feat_extract_first.to_json_file(lowerCAmelCase_ ) _lowercase =self.feature_extraction_class.from_json_file(lowerCAmelCase_ ) _lowercase =feat_extract_first.to_dict() _lowercase =feat_extract_second.to_dict() _lowercase =dict_first.pop("mel_filters" ) _lowercase =dict_second.pop("mel_filters" ) self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ ) ) self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCAmelCase ( self ): # Initialize feature_extractor _lowercase =self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 _lowercase =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _lowercase =[np.asarray(lowerCAmelCase_ ) for speech_input in speech_inputs] # Test not batched input _lowercase =feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=44100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched _lowercase =feature_extractor(lowerCAmelCase_ , return_tensors="np" , sampling_rate=44100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking _lowercase =feature_extractor( lowerCAmelCase_ , return_tensors="np" , sampling_rate=44100 , mask_audio=lowerCAmelCase_ ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. _lowercase =[floats_list((1, x) )[0] for x in (800, 800, 800)] _lowercase =np.asarray(lowerCAmelCase_ ) _lowercase =feature_extractor(lowerCAmelCase_ , return_tensors="np" , sampling_rate=44100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def __lowerCAmelCase ( self , lowerCAmelCase_ ): _lowercase =load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech _lowercase =ds.sort("id" ).select(range(lowerCAmelCase_ ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def __lowerCAmelCase ( self ): _lowercase =self._load_datasamples(1 ) _lowercase =TvltFeatureExtractor() _lowercase =feature_extractor(lowerCAmelCase_ , return_tensors="pt" ).audio_values self.assertEquals(audio_values.shape , (1, 1, 192, 128) ) _lowercase =torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , lowerCAmelCase_ , atol=1e-4 ) )
594
import random from .binary_exp_mod import bin_exp_mod def __lowerCamelCase ( __a : List[Any] , __a : Optional[Any]=1_000 ) -> Union[str, Any]: if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd _lowercase =n - 1 _lowercase =0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) _lowercase =0 while count < prec: _lowercase =random.randint(2 , n - 1 ) _lowercase =bin_exp_mod(__a , __a , __a ) if b != 1: _lowercase =True for _ in range(__a ): if b == n - 1: _lowercase =False break _lowercase =b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": lowerCAmelCase__ = abs(int(input("Enter bound : ").strip())) print("Here's the list of primes:") print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
594
1
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpta import GPTaTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _A = logging.get_logger(__name__) _A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} _A = { "vocab_file": { "gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json", }, "merges_file": { "gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt", }, "tokenizer_file": { "gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json", }, } _A = { "gpt2": 1_024, "gpt2-medium": 1_024, "gpt2-large": 1_024, "gpt2-xl": 1_024, "distilgpt2": 1_024, } class __UpperCAmelCase ( snake_case__ ): """simple docstring""" _snake_case : Optional[Any] = VOCAB_FILES_NAMES _snake_case : str = PRETRAINED_VOCAB_FILES_MAP _snake_case : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : List[str] = ['input_ids', 'attention_mask'] _snake_case : List[Any] = GPTaTokenizer def __init__( self : Union[str, Any] , A_ : Union[str, Any]=None , A_ : Optional[int]=None , A_ : Dict=None , A_ : List[Any]="<|endoftext|>" , A_ : Dict="<|endoftext|>" , A_ : List[Any]="<|endoftext|>" , A_ : List[str]=False , **A_ : str , )-> List[Any]: super().__init__( A_ , A_ , tokenizer_file=A_ , unk_token=A_ , bos_token=A_ , eos_token=A_ , add_prefix_space=A_ , **A_ , ) __UpperCamelCase = kwargs.pop("add_bos_token" , A_ ) __UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , A_ ) != add_prefix_space: __UpperCamelCase = getattr(A_ , pre_tok_state.pop("type" ) ) __UpperCamelCase = add_prefix_space __UpperCamelCase = pre_tok_class(**A_ ) __UpperCamelCase = add_prefix_space def A ( self : str , *A_ : Tuple , **A_ : int )-> BatchEncoding: __UpperCamelCase = kwargs.get("is_split_into_words" , A_ ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*A_ , **A_ ) def A ( self : int , *A_ : Optional[int] , **A_ : List[Any] )-> BatchEncoding: __UpperCamelCase = kwargs.get("is_split_into_words" , A_ ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*A_ , **A_ ) def A ( self : int , A_ : str , A_ : Optional[str] = None )-> Tuple[str]: __UpperCamelCase = self._tokenizer.model.save(A_ , name=A_ ) return tuple(A_ ) def A ( self : Optional[int] , A_ : "Conversation" )-> List[int]: __UpperCamelCase = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(A_ , add_special_tokens=A_ ) + [self.eos_token_id] ) if len(A_ ) > self.model_max_length: __UpperCamelCase = input_ids[-self.model_max_length :] return input_ids
505
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNetaDConditionModel, UNetaDModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class __UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def A ( self : Optional[int] )-> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def A ( self : Optional[Any] )-> int: torch.manual_seed(0 ) __UpperCamelCase = UNetaDModel( sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , ) return model @property def A ( self : str )-> List[Any]: torch.manual_seed(0 ) __UpperCamelCase = UNetaDConditionModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , ) return model @property def A ( self : str )-> Optional[Any]: torch.manual_seed(0 ) __UpperCamelCase = AutoencoderKL( sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , ) __UpperCamelCase = UNetaDModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , ) return vqvae, unet @slow def A ( self : Tuple )-> List[str]: __UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator __UpperCamelCase = Mel( x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , ) __UpperCamelCase = DDPMScheduler() __UpperCamelCase = AudioDiffusionPipeline(vqvae=A_ , unet=self.dummy_unet , mel=A_ , scheduler=A_ ) __UpperCamelCase = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __UpperCamelCase = torch.Generator(device=A_ ).manual_seed(42 ) __UpperCamelCase = pipe(generator=A_ , steps=4 ) __UpperCamelCase = output.audios[0] __UpperCamelCase = output.images[0] __UpperCamelCase = torch.Generator(device=A_ ).manual_seed(42 ) __UpperCamelCase = pipe(generator=A_ , steps=4 , return_dict=A_ ) __UpperCamelCase = output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) __UpperCamelCase = np.frombuffer(image.tobytes() , dtype="uint8" )[:10] __UpperCamelCase = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10] __UpperCamelCase = np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0 __UpperCamelCase = Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , ) __UpperCamelCase = DDIMScheduler() __UpperCamelCase = self.dummy_vqvae_and_unet __UpperCamelCase = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=A_ , scheduler=A_ ) __UpperCamelCase = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) np.random.seed(0 ) __UpperCamelCase = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) ) __UpperCamelCase = torch.Generator(device=A_ ).manual_seed(42 ) __UpperCamelCase = pipe(raw_audio=A_ , generator=A_ , start_step=5 , steps=10 ) __UpperCamelCase = output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) __UpperCamelCase = np.frombuffer(image.tobytes() , dtype="uint8" )[:10] __UpperCamelCase = np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 __UpperCamelCase = self.dummy_unet_condition __UpperCamelCase = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=A_ , mel=A_ , scheduler=A_ ) __UpperCamelCase = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) np.random.seed(0 ) __UpperCamelCase = torch.rand((1, 1, 10) ) __UpperCamelCase = pipe(generator=A_ , encoding=A_ ) __UpperCamelCase = output.images[0] __UpperCamelCase = np.frombuffer(image.tobytes() , dtype="uint8" )[:10] __UpperCamelCase = np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 @slow @require_torch_gpu class __UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def A ( self : str )-> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A ( self : Optional[int] )-> Union[str, Any]: __UpperCamelCase = torch_device __UpperCamelCase = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" ) __UpperCamelCase = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __UpperCamelCase = torch.Generator(device=A_ ).manual_seed(42 ) __UpperCamelCase = pipe(generator=A_ ) __UpperCamelCase = output.audios[0] __UpperCamelCase = output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] __UpperCamelCase = np.frombuffer(image.tobytes() , dtype="uint8" )[:10] __UpperCamelCase = np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
505
1
'''simple docstring''' import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def _lowerCAmelCase ( *_lowerCAmelCase )-> Optional[Any]: if not isinstance(_lowerCAmelCase , _lowerCAmelCase ): __UpperCAmelCase = list(_lowerCAmelCase ) for i in range(len(_lowerCAmelCase ) ): __UpperCAmelCase = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def _lowerCAmelCase ( _lowerCAmelCase )-> bool: __UpperCAmelCase = [ 'CUDA out of memory.', # CUDA OOM 'cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.', # CUDNN SNAFU 'DefaultCPUAllocator: can\'t allocate memory', # CPU OOM ] if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def _lowerCAmelCase ( _lowerCAmelCase = None , _lowerCAmelCase = 1_28 )-> Dict: if function is None: return functools.partial(_lowerCAmelCase , starting_batch_size=_lowerCAmelCase ) __UpperCAmelCase = starting_batch_size def decorator(*_lowerCAmelCase , **_lowerCAmelCase ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() __UpperCAmelCase = list(inspect.signature(_lowerCAmelCase ).parameters.keys() ) # Guard against user error if len(_lowerCAmelCase ) < (len(_lowerCAmelCase ) + 1): __UpperCAmelCase = ', '.join([F'{arg}={value}' for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( F'Batch size was passed into `{function.__name__}` as the first argument when called.' F'Remove this as the decorator already does so: `{function.__name__}({arg_str})`' ) while True: if batch_size == 0: raise RuntimeError('No executable batch size found, reached zero.' ) try: return function(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase ) except Exception as e: if should_reduce_batch_size(_lowerCAmelCase ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
617
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A: Tuple = logging.get_logger(__name__) class UpperCAmelCase ( UpperCAmelCase_ ): _A : List[Any] = """timm_backbone""" def __init__( self , __A=None , __A=3 , __A=True , __A=True , __A=None , **__A , ): super().__init__(**__A ) __UpperCAmelCase = backbone __UpperCAmelCase = num_channels __UpperCAmelCase = features_only __UpperCAmelCase = use_pretrained_backbone __UpperCAmelCase = True __UpperCAmelCase = out_indices if out_indices is not None else (-1,)
617
1
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class _snake_case : # setable values _A : Optional[int] = None _A : Optional[jnp.ndarray] = None _A : Optional[jnp.ndarray] = None # sigma(t_i) @classmethod def __UpperCamelCase ( cls : Union[str, Any] ): return cls() @dataclass class _snake_case ( _a ): _A : jnp.ndarray _A : jnp.ndarray _A : KarrasVeSchedulerState class _snake_case ( _a , _a ): @property def __UpperCamelCase ( self : List[str] ): return True @register_to_config def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : float = 0.02 ,SCREAMING_SNAKE_CASE__ : float = 100 ,SCREAMING_SNAKE_CASE__ : float = 1.007 ,SCREAMING_SNAKE_CASE__ : float = 80 ,SCREAMING_SNAKE_CASE__ : float = 0.05 ,SCREAMING_SNAKE_CASE__ : float = 50 ,): pass def __UpperCamelCase ( self : Any ): return KarrasVeSchedulerState.create() def __UpperCamelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : KarrasVeSchedulerState ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Tuple = () ): SCREAMING_SNAKE_CASE:int = jnp.arange(0 ,SCREAMING_SNAKE_CASE__ )[::-1].copy() SCREAMING_SNAKE_CASE:Tuple = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=SCREAMING_SNAKE_CASE__ ,schedule=jnp.array(SCREAMING_SNAKE_CASE__ ,dtype=jnp.floataa ) ,timesteps=SCREAMING_SNAKE_CASE__ ,) def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : KarrasVeSchedulerState ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : float ,SCREAMING_SNAKE_CASE__ : random.KeyArray ,): if self.config.s_min <= sigma <= self.config.s_max: SCREAMING_SNAKE_CASE:Dict = min(self.config.s_churn / state.num_inference_steps ,2**0.5 - 1 ) else: SCREAMING_SNAKE_CASE:Tuple = 0 # sample eps ~ N(0, S_noise^2 * I) SCREAMING_SNAKE_CASE:Dict = random.split(SCREAMING_SNAKE_CASE__ ,num=1 ) SCREAMING_SNAKE_CASE:Any = self.config.s_noise * random.normal(key=SCREAMING_SNAKE_CASE__ ,shape=sample.shape ) SCREAMING_SNAKE_CASE:Tuple = sigma + gamma * sigma SCREAMING_SNAKE_CASE:Optional[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : KarrasVeSchedulerState ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : float ,SCREAMING_SNAKE_CASE__ : float ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : bool = True ,): SCREAMING_SNAKE_CASE:Union[str, Any] = sample_hat + sigma_hat * model_output SCREAMING_SNAKE_CASE:str = (sample_hat - pred_original_sample) / sigma_hat SCREAMING_SNAKE_CASE:List[str] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=SCREAMING_SNAKE_CASE__ ,derivative=SCREAMING_SNAKE_CASE__ ,state=SCREAMING_SNAKE_CASE__ ) def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : KarrasVeSchedulerState ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : float ,SCREAMING_SNAKE_CASE__ : float ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : bool = True ,): SCREAMING_SNAKE_CASE:Optional[Any] = sample_prev + sigma_prev * model_output SCREAMING_SNAKE_CASE:str = (sample_prev - pred_original_sample) / sigma_prev SCREAMING_SNAKE_CASE:Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=SCREAMING_SNAKE_CASE__ ,derivative=SCREAMING_SNAKE_CASE__ ,state=SCREAMING_SNAKE_CASE__ ) def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : KarrasVeSchedulerState ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Any ): raise NotImplementedError()
143
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _snake_case ( _a , _a , unittest.TestCase ): _A : List[Any] = IFInpaintingPipeline _A : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} _A : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS _A : List[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''} def __UpperCamelCase ( self : Dict ): return self._get_dummy_components() def __UpperCamelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 ): if str(SCREAMING_SNAKE_CASE__ ).startswith("mps" ): SCREAMING_SNAKE_CASE:Any = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: SCREAMING_SNAKE_CASE:int = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE:Union[str, Any] = floats_tensor((1, 3, 32, 32) ,rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE:Tuple = floats_tensor((1, 3, 32, 32) ,rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE:Optional[Any] = { "prompt": "A painting of a squirrel eating a burger", "image": image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,) def __UpperCamelCase ( self : List[str] ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def __UpperCamelCase ( self : Any ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" ,reason="float16 requires CUDA" ) def __UpperCamelCase ( self : Any ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def __UpperCamelCase ( self : Optional[int] ): self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def __UpperCamelCase ( self : Any ): self._test_save_load_local() def __UpperCamelCase ( self : int ): self._test_inference_batch_single_identical( expected_max_diff=1e-2 ,)
143
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer __A = logging.get_logger(__name__) __A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __A = { "vocab_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt" ), "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt", "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt" ), }, "tokenizer_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli": ( "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json" ), }, } __A = { "squeezebert/squeezebert-uncased": 512, "squeezebert/squeezebert-mnli": 512, "squeezebert/squeezebert-mnli-headless": 512, } __A = { "squeezebert/squeezebert-uncased": {"do_lower_case": True}, "squeezebert/squeezebert-mnli": {"do_lower_case": True}, "squeezebert/squeezebert-mnli-headless": {"do_lower_case": True}, } class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_INIT_CONFIGURATION lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = SqueezeBertTokenizer def __init__(self : Any , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[int]="[UNK]" , UpperCAmelCase_ : int="[SEP]" , UpperCAmelCase_ : Optional[Any]="[PAD]" , UpperCAmelCase_ : Optional[int]="[CLS]" , UpperCAmelCase_ : str="[MASK]" , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[Any]=None , **UpperCAmelCase_ : Dict , ) ->Union[str, Any]: '''simple docstring''' super().__init__( UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCamelCase__: Dict =json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get("lowercase" , UpperCAmelCase_) != do_lower_case or normalizer_state.get("strip_accents" , UpperCAmelCase_) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase_) != tokenize_chinese_chars ): lowerCamelCase__: Any =getattr(UpperCAmelCase_ , normalizer_state.pop("type")) lowerCamelCase__: Union[str, Any] =do_lower_case lowerCamelCase__: Optional[Any] =strip_accents lowerCamelCase__: Optional[int] =tokenize_chinese_chars lowerCamelCase__: int =normalizer_class(**UpperCAmelCase_) lowerCamelCase__: Any =do_lower_case def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str=None) ->Dict: '''simple docstring''' lowerCamelCase__: List[str] =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]: '''simple docstring''' lowerCamelCase__: Dict =[self.sep_token_id] lowerCamelCase__: Tuple =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]: '''simple docstring''' lowerCamelCase__: Optional[int] =self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_) return tuple(UpperCAmelCase_)
437
from __future__ import annotations def lowerCAmelCase_ ( __a , __a , __a , ) -> tuple: """simple docstring""" if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError("You cannot supply more or less than 2 values" ) elif electron_conc < 0: raise ValueError("Electron concentration cannot be negative in a semiconductor" ) elif hole_conc < 0: raise ValueError("Hole concentration cannot be negative in a semiconductor" ) elif intrinsic_conc < 0: raise ValueError( "Intrinsic concentration cannot be negative in a semiconductor" ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
437
1
import math import random def _A ( __magic_name__ , __magic_name__ = False ): if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value _snake_case = 0.02 def _A ( __magic_name__ , __magic_name__ ): lowercase__ = float(2 * (random.randint(1 , 100 )) - 1 ) for _ in range(__magic_name__ ): # Forward propagation lowercase__ = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? lowercase__ = (expected / 100) - layer_a # Error delta lowercase__ = layer_1_error * sigmoid_function(__magic_name__ , __magic_name__ ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() _snake_case = int(input("""Expected value: """)) _snake_case = int(input("""Number of propagations: """)) print(forward_propagation(expected, number_propagations))
655
import math import random def _A ( __magic_name__ , __magic_name__ = False ): if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value _snake_case = 0.02 def _A ( __magic_name__ , __magic_name__ ): lowercase__ = float(2 * (random.randint(1 , 100 )) - 1 ) for _ in range(__magic_name__ ): # Forward propagation lowercase__ = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? lowercase__ = (expected / 100) - layer_a # Error delta lowercase__ = layer_1_error * sigmoid_function(__magic_name__ , __magic_name__ ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() _snake_case = int(input("""Expected value: """)) _snake_case = int(input("""Number of propagations: """)) print(forward_propagation(expected, number_propagations))
655
1
import argparse import glob import logging import os from argparse import Namespace from importlib import import_module import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader, TensorDataset from utils_ner import TokenClassificationTask UpperCAmelCase_ : Any = logging.getLogger(__name__) class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : List[Any] = """token-classification""" def __init__( self : Dict , __lowerCamelCase : Any ): if type(__lowerCamelCase ) == dict: UpperCamelCase :Optional[int] = Namespace(**__lowerCamelCase ) UpperCamelCase :Union[str, Any] = import_module("""tasks""" ) try: UpperCamelCase :List[str] = getattr(__lowerCamelCase , hparams.task_type ) UpperCamelCase :TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( F"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) UpperCamelCase :Any = self.token_classification_task.get_labels(hparams.labels ) UpperCamelCase :List[str] = CrossEntropyLoss().ignore_index super().__init__(__lowerCamelCase , len(self.labels ) , self.mode ) def _A ( self : str , **__lowerCamelCase : Any ): return self.model(**__lowerCamelCase ) def _A ( self : str , __lowerCamelCase : int , __lowerCamelCase : int ): UpperCamelCase :int = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type != "distilbert": UpperCamelCase :Any = ( batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None ) # XLM and RoBERTa don"t use token_type_ids UpperCamelCase :Optional[Any] = self(**__lowerCamelCase ) UpperCamelCase :Union[str, Any] = outputs[0] # tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]} return {"loss": loss} def _A ( self : Tuple ): UpperCamelCase :Dict = self.hparams for mode in ["train", "dev", "test"]: UpperCamelCase :str = self._feature_file(__lowerCamelCase ) if os.path.exists(__lowerCamelCase ) and not args.overwrite_cache: logger.info("""Loading features from cached file %s""" , __lowerCamelCase ) UpperCamelCase :Optional[Any] = torch.load(__lowerCamelCase ) else: logger.info("""Creating features from dataset file at %s""" , args.data_dir ) UpperCamelCase :Any = self.token_classification_task.read_examples_from_file(args.data_dir , __lowerCamelCase ) UpperCamelCase :Optional[int] = self.token_classification_task.convert_examples_to_features( __lowerCamelCase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__lowerCamelCase , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info("""Saving features into cached file %s""" , __lowerCamelCase ) torch.save(__lowerCamelCase , __lowerCamelCase ) def _A ( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : bool = False ): UpperCamelCase :List[Any] = self._feature_file(__lowerCamelCase ) logger.info("""Loading features from cached file %s""" , __lowerCamelCase ) UpperCamelCase :Any = torch.load(__lowerCamelCase ) UpperCamelCase :List[Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) UpperCamelCase :Union[str, Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) if features[0].token_type_ids is not None: UpperCamelCase :Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) else: UpperCamelCase :Tuple = torch.tensor([0 for f in features] , dtype=torch.long ) # HACK(we will not use this anymore soon) UpperCamelCase :int = torch.tensor([f.label_ids for f in features] , dtype=torch.long ) return DataLoader( TensorDataset(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , batch_size=__lowerCamelCase ) def _A ( self : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] ): """Compute validation""" "" UpperCamelCase :Optional[int] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type != "distilbert": UpperCamelCase :int = ( batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None ) # XLM and RoBERTa don"t use token_type_ids UpperCamelCase :Dict = self(**__lowerCamelCase ) UpperCamelCase , UpperCamelCase :int = outputs[:2] UpperCamelCase :Union[str, Any] = logits.detach().cpu().numpy() UpperCamelCase :Union[str, Any] = inputs["""labels"""].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def _A ( self : str , __lowerCamelCase : int ): UpperCamelCase :Dict = torch.stack([x["""val_loss"""] for x in outputs] ).mean() UpperCamelCase :Union[str, Any] = np.concatenate([x["""pred"""] for x in outputs] , axis=0 ) UpperCamelCase :Union[str, Any] = np.argmax(__lowerCamelCase , axis=2 ) UpperCamelCase :Union[str, Any] = np.concatenate([x["""target"""] for x in outputs] , axis=0 ) UpperCamelCase :Tuple = dict(enumerate(self.labels ) ) UpperCamelCase :Optional[int] = [[] for _ in range(out_label_ids.shape[0] )] UpperCamelCase :Tuple = [[] for _ in range(out_label_ids.shape[0] )] for i in range(out_label_ids.shape[0] ): for j in range(out_label_ids.shape[1] ): if out_label_ids[i, j] != self.pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) UpperCamelCase :Union[str, Any] = { """val_loss""": val_loss_mean, """accuracy_score""": accuracy_score(__lowerCamelCase , __lowerCamelCase ), """precision""": precision_score(__lowerCamelCase , __lowerCamelCase ), """recall""": recall_score(__lowerCamelCase , __lowerCamelCase ), """f1""": fa_score(__lowerCamelCase , __lowerCamelCase ), } UpperCamelCase :Dict = dict(results.items() ) UpperCamelCase :Tuple = results return ret, preds_list, out_label_list def _A ( self : Optional[Any] , __lowerCamelCase : int ): # when stable UpperCamelCase , UpperCamelCase , UpperCamelCase :List[str] = self._eval_end(__lowerCamelCase ) UpperCamelCase :Any = ret["""log"""] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def _A ( self : Dict , __lowerCamelCase : Optional[int] ): # updating to test_epoch_end instead of deprecated test_end UpperCamelCase , UpperCamelCase , UpperCamelCase :Optional[Any] = self._eval_end(__lowerCamelCase ) # Converting to the dict required by pl # https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\ # pytorch_lightning/trainer/logging.py#L139 UpperCamelCase :Tuple = ret["""log"""] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def _A ( __lowerCamelCase : List[str] , __lowerCamelCase : Any ): # Add NER specific options BaseTransformer.add_model_specific_args(__lowerCamelCase , __lowerCamelCase ) parser.add_argument( """--task_type""" , default="""NER""" , type=__lowerCamelCase , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" ) parser.add_argument( """--max_seq_length""" , default=128 , type=__lowerCamelCase , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--labels""" , default="""""" , type=__lowerCamelCase , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , ) parser.add_argument( """--gpus""" , default=0 , type=__lowerCamelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , ) parser.add_argument( """--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" ) return parser if __name__ == "__main__": UpperCAmelCase_ : int = argparse.ArgumentParser() add_generic_args(parser, os.getcwd()) UpperCAmelCase_ : str = NERTransformer.add_model_specific_args(parser, os.getcwd()) UpperCAmelCase_ : Tuple = parser.parse_args() UpperCAmelCase_ : int = NERTransformer(args) UpperCAmelCase_ : int = generic_train(model, args) if args.do_predict: # See https://github.com/huggingface/transformers/issues/3159 # pl use this default format to create a checkpoint: # https://github.com/PyTorchLightning/pytorch-lightning/blob/master\ # /pytorch_lightning/callbacks/model_checkpoint.py#L322 UpperCAmelCase_ : Any = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True)) UpperCAmelCase_ : Any = model.load_from_checkpoint(checkpoints[-1]) trainer.test(model)
590
from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging UpperCAmelCase_ : str = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[tf.Tensor, np.ndarray] ) -> List[int]: """simple docstring""" if isinstance(__magic_name__ , np.ndarray ): return list(tensor.shape ) UpperCamelCase :List[Any] = tf.shape(__magic_name__ ) if tensor.shape == tf.TensorShape(__magic_name__ ): return dynamic UpperCamelCase :Dict = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(__magic_name__ )] def SCREAMING_SNAKE_CASE_ ( __magic_name__ : tf.Tensor , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[str] = None ) -> tf.Tensor: """simple docstring""" return tf.nn.softmax(logits=logits + 1E-9 , axis=__magic_name__ , name=__magic_name__ ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Tuple=1E-5 , __magic_name__ : Optional[Any]=-1 ) -> int: """simple docstring""" if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__magic_name__ , __magic_name__ ): raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" ) # Get mean and variance on the axis to be normalized UpperCamelCase , UpperCamelCase :Tuple = tf.nn.moments(__magic_name__ , axes=[axis] , keepdims=__magic_name__ ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis UpperCamelCase :str = [1] * inputs.shape.rank UpperCamelCase :int = shape_list(__magic_name__ )[axis] UpperCamelCase :Optional[Any] = tf.reshape(__magic_name__ , __magic_name__ ) UpperCamelCase :Tuple = tf.reshape(__magic_name__ , __magic_name__ ) # Compute layer normalization using the batch_normalization # function. UpperCamelCase :Union[str, Any] = tf.nn.batch_normalization( __magic_name__ , __magic_name__ , __magic_name__ , offset=__magic_name__ , scale=__magic_name__ , variance_epsilon=__magic_name__ , ) return outputs def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] , __magic_name__ : List[Any]=0 , __magic_name__ : Any=-1 ) -> str: """simple docstring""" if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input UpperCamelCase :Optional[Any] = tf.shape(__magic_name__ ) UpperCamelCase :Optional[int] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) UpperCamelCase :int = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(__magic_name__ , __magic_name__ ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : tf.Tensor ) -> tf.Tensor: """simple docstring""" if not isinstance(__magic_name__ , tf.Tensor ): UpperCamelCase :Union[str, Any] = tf.convert_to_tensor(__magic_name__ ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: UpperCamelCase :Optional[Any] = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: UpperCamelCase :List[str] = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) UpperCamelCase :Dict = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def SCREAMING_SNAKE_CASE_ ( __magic_name__ : tf.Tensor , __magic_name__ : int , __magic_name__ : str = "input_ids" ) -> None: """simple docstring""" tf.debugging.assert_less( __magic_name__ , tf.cast(__magic_name__ , dtype=tensor.dtype ) , message=( f"""The maximum value of {tensor_name} ({tf.math.reduce_max(__magic_name__ )}) must be smaller than the embedding """ f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time.""" ) , ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : List[str] ) -> Dict: """simple docstring""" UpperCamelCase :List[Any] = 6_4512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. UpperCamelCase :Dict = [x for x in data if len(__magic_name__ ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( """The following attributes cannot be saved to HDF5 file because """ f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """ f"""bytes: {bad_attributes}""" ) UpperCamelCase :Tuple = np.asarray(__magic_name__ ) UpperCamelCase :Optional[Any] = 1 UpperCamelCase :Tuple = np.array_split(__magic_name__ , __magic_name__ ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 UpperCamelCase :Union[str, Any] = np.array_split(__magic_name__ , __magic_name__ ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(__magic_name__ ): UpperCamelCase :List[str] = chunk_data else: UpperCamelCase :Any = data def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] ) -> str: """simple docstring""" if name in group.attrs: UpperCamelCase :List[Any] = [n.decode("""utf8""" ) if hasattr(__magic_name__ , """decode""" ) else n for n in group.attrs[name]] else: UpperCamelCase :Tuple = [] UpperCamelCase :int = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("""utf8""" ) if hasattr(__magic_name__ , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] ) chunk_id += 1 return data def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> Any: """simple docstring""" def _expand_single_ad_tensor(__magic_name__ : List[Any] ): if isinstance(__magic_name__ , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(__magic_name__ , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , __magic_name__ )
590
1
import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def lowercase ( _lowerCAmelCase , _lowerCAmelCase=1 ): if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def lowercase ( _lowerCAmelCase , _lowerCAmelCase=0 ): UpperCAmelCase__ = [] for old_item in old_list: UpperCAmelCase__ = old_item.replace("""in_layers.0""" , """norm1""" ) UpperCAmelCase__ = new_item.replace("""in_layers.2""" , """conv1""" ) UpperCAmelCase__ = new_item.replace("""out_layers.0""" , """norm2""" ) UpperCAmelCase__ = new_item.replace("""out_layers.3""" , """conv2""" ) UpperCAmelCase__ = new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) UpperCAmelCase__ = new_item.replace("""skip_connection""" , """conv_shortcut""" ) UpperCAmelCase__ = shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def lowercase ( _lowerCAmelCase , _lowerCAmelCase=0 ): UpperCAmelCase__ = [] for old_item in old_list: UpperCAmelCase__ = old_item UpperCAmelCase__ = new_item.replace("""norm.weight""" , """group_norm.weight""" ) UpperCAmelCase__ = new_item.replace("""norm.bias""" , """group_norm.bias""" ) UpperCAmelCase__ = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) UpperCAmelCase__ = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) UpperCAmelCase__ = shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def lowercase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ): assert isinstance(__UpperCamelCase , __UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): UpperCAmelCase__ = old_checkpoint[path] UpperCAmelCase__ = old_tensor.shape[0] // 3 UpperCAmelCase__ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) UpperCAmelCase__ = old_tensor.shape[0] // config['num_head_channels'] // 3 UpperCAmelCase__ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) UpperCAmelCase__ = old_tensor.split(channels // num_heads , dim=1 ) UpperCAmelCase__ = query.reshape(__UpperCamelCase ) UpperCAmelCase__ = key.reshape(__UpperCamelCase ) UpperCAmelCase__ = value.reshape(__UpperCamelCase ) for path in paths: UpperCAmelCase__ = path['new'] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here UpperCAmelCase__ = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) UpperCAmelCase__ = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) UpperCAmelCase__ = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: UpperCAmelCase__ = new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: UpperCAmelCase__ = old_checkpoint[path['old']][:, :, 0] else: UpperCAmelCase__ = old_checkpoint[path['old']] def lowercase ( _lowerCAmelCase , _lowerCAmelCase ): UpperCAmelCase__ = {} UpperCAmelCase__ = checkpoint['time_embed.0.weight'] UpperCAmelCase__ = checkpoint['time_embed.0.bias'] UpperCAmelCase__ = checkpoint['time_embed.2.weight'] UpperCAmelCase__ = checkpoint['time_embed.2.bias'] UpperCAmelCase__ = checkpoint['input_blocks.0.0.weight'] UpperCAmelCase__ = checkpoint['input_blocks.0.0.bias'] UpperCAmelCase__ = checkpoint['out.0.weight'] UpperCAmelCase__ = checkpoint['out.0.bias'] UpperCAmelCase__ = checkpoint['out.2.weight'] UpperCAmelCase__ = checkpoint['out.2.bias'] # Retrieves the keys for the input blocks only UpperCAmelCase__ = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) UpperCAmelCase__ = { layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } # Retrieves the keys for the middle blocks only UpperCAmelCase__ = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) UpperCAmelCase__ = { layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } # Retrieves the keys for the output blocks only UpperCAmelCase__ = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) UpperCAmelCase__ = { layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } for i in range(1 , __UpperCamelCase ): UpperCAmelCase__ = (i - 1) // (config['num_res_blocks'] + 1) UpperCAmelCase__ = (i - 1) % (config['num_res_blocks'] + 1) UpperCAmelCase__ = [key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key] UpperCAmelCase__ = [key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key] if F'''input_blocks.{i}.0.op.weight''' in checkpoint: UpperCAmelCase__ = checkpoint[ F'''input_blocks.{i}.0.op.weight''' ] UpperCAmelCase__ = checkpoint[ F'''input_blocks.{i}.0.op.bias''' ] continue UpperCAmelCase__ = renew_resnet_paths(__UpperCamelCase ) UpperCAmelCase__ = {'old': F'''input_blocks.{i}.0''', 'new': F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''} UpperCAmelCase__ = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'} assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=__UpperCamelCase ) if len(__UpperCamelCase ): UpperCAmelCase__ = renew_attention_paths(__UpperCamelCase ) UpperCAmelCase__ = { 'old': F'''input_blocks.{i}.1''', 'new': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''', } UpperCAmelCase__ = { F'''input_blocks.{i}.1.qkv.bias''': { 'key': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', 'query': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', 'value': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''input_blocks.{i}.1.qkv.weight''': { 'key': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', 'query': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', 'value': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase , ) UpperCAmelCase__ = middle_blocks[0] UpperCAmelCase__ = middle_blocks[1] UpperCAmelCase__ = middle_blocks[2] UpperCAmelCase__ = renew_resnet_paths(__UpperCamelCase ) assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase ) UpperCAmelCase__ = renew_resnet_paths(__UpperCamelCase ) assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase ) UpperCAmelCase__ = renew_attention_paths(__UpperCamelCase ) UpperCAmelCase__ = { 'middle_block.1.qkv.bias': { 'key': 'mid_block.attentions.0.key.bias', 'query': 'mid_block.attentions.0.query.bias', 'value': 'mid_block.attentions.0.value.bias', }, 'middle_block.1.qkv.weight': { 'key': 'mid_block.attentions.0.key.weight', 'query': 'mid_block.attentions.0.query.weight', 'value': 'mid_block.attentions.0.value.weight', }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase ) for i in range(__UpperCamelCase ): UpperCAmelCase__ = i // (config['num_res_blocks'] + 1) UpperCAmelCase__ = i % (config['num_res_blocks'] + 1) UpperCAmelCase__ = [shave_segments(__UpperCamelCase , 2 ) for name in output_blocks[i]] UpperCAmelCase__ = {} for layer in output_block_layers: UpperCAmelCase__ = layer.split(""".""" )[0], shave_segments(__UpperCamelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(__UpperCamelCase ) else: UpperCAmelCase__ = [layer_name] if len(__UpperCamelCase ) > 1: UpperCAmelCase__ = [key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key] UpperCAmelCase__ = [key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key] UpperCAmelCase__ = renew_resnet_paths(__UpperCamelCase ) UpperCAmelCase__ = renew_resnet_paths(__UpperCamelCase ) UpperCAmelCase__ = {'old': F'''output_blocks.{i}.0''', 'new': F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''} assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): UpperCAmelCase__ = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) UpperCAmelCase__ = checkpoint[ F'''output_blocks.{i}.{index}.conv.weight''' ] UpperCAmelCase__ = checkpoint[ F'''output_blocks.{i}.{index}.conv.bias''' ] # Clear attentions as they have been attributed above. if len(__UpperCamelCase ) == 2: UpperCAmelCase__ = [] if len(__UpperCamelCase ): UpperCAmelCase__ = renew_attention_paths(__UpperCamelCase ) UpperCAmelCase__ = { 'old': F'''output_blocks.{i}.1''', 'new': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''', } UpperCAmelCase__ = { F'''output_blocks.{i}.1.qkv.bias''': { 'key': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', 'query': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', 'value': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''output_blocks.{i}.1.qkv.weight''': { 'key': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', 'query': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', 'value': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=__UpperCamelCase , ) else: UpperCAmelCase__ = renew_resnet_paths(__UpperCamelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: UpperCAmelCase__ = '.'.join(["""output_blocks""", str(__UpperCamelCase ), path["""old"""]] ) UpperCAmelCase__ = '.'.join(["""up_blocks""", str(__UpperCamelCase ), """resnets""", str(__UpperCamelCase ), path["""new"""]] ) UpperCAmelCase__ = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": snake_case__ : Tuple = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') snake_case__ : Dict = parser.parse_args() snake_case__ : Any = torch.load(args.checkpoint_path) with open(args.config_file) as f: snake_case__ : str = json.loads(f.read()) snake_case__ : Dict = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] snake_case__ : Tuple = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: snake_case__ : Union[str, Any] = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) snake_case__ : Union[str, Any] = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) snake_case__ : List[str] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
392
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : str = LongformerTokenizer A : List[str] = True A : Optional[int] = LongformerTokenizerFast A : Tuple = True def UpperCamelCase_ ( self ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE : Any = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(A, range(len(A ) ) ) ) SCREAMING_SNAKE_CASE : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] SCREAMING_SNAKE_CASE : Tuple = {'unk_token': '<unk>'} SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] ) SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file, 'w', encoding='utf-8' ) as fp: fp.write(json.dumps(A ) + '\n' ) with open(self.merges_file, 'w', encoding='utf-8' ) as fp: fp.write('\n'.join(A ) ) def UpperCamelCase_ ( self, **A ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname, **A ) def UpperCamelCase_ ( self, **A ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = 'lower newer' SCREAMING_SNAKE_CASE : Union[str, Any] = 'lower newer' return input_text, output_text def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map ) SCREAMING_SNAKE_CASE : Optional[Any] = 'lower newer' SCREAMING_SNAKE_CASE : List[str] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(A ) # , add_prefix_space=True) self.assertListEqual(A, A ) SCREAMING_SNAKE_CASE : List[Any] = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!', add_special_tokens=A ), [0, 31_414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418', add_special_tokens=A ), [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2], ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('sequence builders', add_special_tokens=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode('multi-sequence build', add_special_tokens=A ) SCREAMING_SNAKE_CASE : int = tokenizer.encode( 'sequence builders', add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode( 'sequence builders', 'multi-sequence build', add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A, A ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Optional[int] = 'Encode this sequence.' SCREAMING_SNAKE_CASE : List[str] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]] # Testing encoder arguments SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(A, A ) SCREAMING_SNAKE_CASE : str = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(A, A ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(A, A ) # Testing spaces after special tokens SCREAMING_SNAKE_CASE : Optional[int] = '<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(A, lstrip=A, rstrip=A )} ) # mask token has a left space SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(A ) SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask> sequence' SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask>sequence' SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(A ) SCREAMING_SNAKE_CASE : Tuple = encoded.index(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(A, A ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = encoded.index(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(A, A ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(A, **A ) SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(A, **A ) SCREAMING_SNAKE_CASE : Optional[Any] = 'A, <mask> AllenNLP sentence.' SCREAMING_SNAKE_CASE : Any = tokenizer_r.encode_plus(A, add_special_tokens=A, return_token_type_ids=A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.encode_plus(A, add_special_tokens=A, return_token_type_ids=A ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ), sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ), sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ), ) SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def UpperCamelCase_ ( self ): '''simple docstring''' for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2 ): SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained( self.tmpdirname, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'], A ) self.assertEqual(post_processor_state['add_prefix_space'], A ) self.assertEqual(post_processor_state['trim_offsets'], A ) def UpperCamelCase_ ( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): SCREAMING_SNAKE_CASE : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name` SCREAMING_SNAKE_CASE : Tuple = F"{text_of_1_token} {text_of_1_token}" SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Tuple = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Any = F" {text}" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : List[str] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : str = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
28
0
"""simple docstring""" from __future__ import annotations from decimal import Decimal from numpy import array def UpperCAmelCase__ ( lowerCAmelCase__ :list[list[float]] ) -> list[list[float]]: '''simple docstring''' lowercase = Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(lowerCAmelCase__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2: # Calculate the determinant of the matrix lowercase = float( d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) ) if determinant == 0: raise ValueError("""This matrix has no inverse.""" ) # Creates a copy of the matrix with swapped positions of the elements lowercase = [[0.0, 0.0], [0.0, 0.0]] lowercase , lowercase = matrix[1][1], matrix[0][0] lowercase , lowercase = -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(lowerCAmelCase__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(lowerCAmelCase__ ) == 3 and len(matrix[0] ) == 3 and len(matrix[1] ) == 3 and len(matrix[2] ) == 3 ): # Calculate the determinant of the matrix using Sarrus rule lowercase = float( ( (d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] )) + (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] )) + (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] )) ) - ( (d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] )) + (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] )) + (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] )) ) ) if determinant == 0: raise ValueError("""This matrix has no inverse.""" ) # Creating cofactor matrix lowercase = [ [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], ] lowercase = (d(matrix[1][1] ) * d(matrix[2][2] )) - ( d(matrix[1][2] ) * d(matrix[2][1] ) ) lowercase = -( (d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] )) ) lowercase = (d(matrix[1][0] ) * d(matrix[2][1] )) - ( d(matrix[1][1] ) * d(matrix[2][0] ) ) lowercase = -( (d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] )) ) lowercase = (d(matrix[0][0] ) * d(matrix[2][2] )) - ( d(matrix[0][2] ) * d(matrix[2][0] ) ) lowercase = -( (d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] )) ) lowercase = (d(matrix[0][1] ) * d(matrix[1][2] )) - ( d(matrix[0][2] ) * d(matrix[1][1] ) ) lowercase = -( (d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] )) ) lowercase = (d(matrix[0][0] ) * d(matrix[1][1] )) - ( d(matrix[0][1] ) * d(matrix[1][0] ) ) # Transpose the cofactor matrix (Adjoint matrix) lowercase = array(lowerCAmelCase__ ) for i in range(3 ): for j in range(3 ): lowercase = cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix lowercase = array(lowerCAmelCase__ ) for i in range(3 ): for j in range(3 ): inverse_matrix[i][j] /= d(lowerCAmelCase__ ) # Calculate the inverse of the matrix return [[float(d(lowerCAmelCase__ ) ) or 0.0 for n in row] for row in inverse_matrix] raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
197
"""simple docstring""" from __future__ import annotations def UpperCAmelCase__ ( lowerCAmelCase__ :list[float] , lowerCAmelCase__ :list[float] ) -> float: '''simple docstring''' lowercase = sorted(numsa + numsa ) lowercase , lowercase = divmod(len(lowerCAmelCase__ ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : List[Any] =[float(x) for x in input("""Enter the elements of first array: """).split()] __lowerCAmelCase : List[Any] =[float(x) for x in input("""Enter the elements of second array: """).split()] print(F"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
197
1
import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml lowercase_ = NewType('''DataClass''', Any) lowercase_ = NewType('''DataClassType''', Any) def __lowerCAmelCase ( __lowerCamelCase : Union[str, Any] ) -> int: if isinstance(__lowerCamelCase , __lowerCamelCase ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" ) def __lowerCAmelCase ( __lowerCamelCase : list ) -> Callable[[str], Any]: __lowerCAmelCase ={str(__lowerCamelCase ): choice for choice in choices} return lambda __lowerCamelCase : str_to_choice.get(__lowerCamelCase , __lowerCamelCase ) def __lowerCAmelCase ( *, __lowerCamelCase : Union[str, List[str]] = None , __lowerCamelCase : str = None , __lowerCamelCase : Any = dataclasses.MISSING , __lowerCamelCase : Callable[[], Any] = dataclasses.MISSING , __lowerCamelCase : dict = None , **__lowerCamelCase : int , ) -> dataclasses.Field: if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls __lowerCAmelCase ={} if aliases is not None: __lowerCAmelCase =aliases if help is not None: __lowerCAmelCase =help return dataclasses.field(metadata=__lowerCamelCase , default=__lowerCamelCase , default_factory=__lowerCamelCase , **__lowerCamelCase ) class __a ( SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE = 42 def __init__( self : List[Any] , snake_case_ : Union[DataClassType, Iterable[DataClassType]] , **snake_case_ : Dict)-> List[str]: # To make the default appear when using --help if "formatter_class" not in kwargs: __lowerCAmelCase =ArgumentDefaultsHelpFormatter super().__init__(**snake_case_) if dataclasses.is_dataclass(snake_case_): __lowerCAmelCase =[dataclass_types] __lowerCAmelCase =list(snake_case_) for dtype in self.dataclass_types: self._add_dataclass_arguments(snake_case_) @staticmethod def UpperCamelCase ( snake_case_ : ArgumentParser , snake_case_ : dataclasses.Field)-> str: __lowerCAmelCase =F"""--{field.name}""" __lowerCAmelCase =field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , snake_case_): raise RuntimeError( """Unresolved type detected, which should have been done with the help of """ """`typing.get_type_hints` method by default""") __lowerCAmelCase =kwargs.pop("""aliases""" , []) if isinstance(snake_case_ , snake_case_): __lowerCAmelCase =[aliases] __lowerCAmelCase =getattr(field.type , """__origin__""" , field.type) if origin_type is Union or (hasattr(snake_case_ , """UnionType""") and isinstance(snake_case_ , types.UnionType)): if str not in field.type.__args__ and ( len(field.type.__args__) != 2 or type(snake_case_) not in field.type.__args__ ): raise ValueError( """Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because""" """ the argument parser only supports one type per argument.""" F""" Problem encountered in field '{field.name}'.""") if type(snake_case_) not in field.type.__args__: # filter `str` in Union __lowerCAmelCase =field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] __lowerCAmelCase =getattr(field.type , """__origin__""" , field.type) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) __lowerCAmelCase =( field.type.__args__[0] if isinstance(snake_case_ , field.type.__args__[1]) else field.type.__args__[1] ) __lowerCAmelCase =getattr(field.type , """__origin__""" , field.type) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) __lowerCAmelCase ={} if origin_type is Literal or (isinstance(field.type , snake_case_) and issubclass(field.type , snake_case_)): if origin_type is Literal: __lowerCAmelCase =field.type.__args__ else: __lowerCAmelCase =[x.value for x in field.type] __lowerCAmelCase =make_choice_type_function(kwargs["""choices"""]) if field.default is not dataclasses.MISSING: __lowerCAmelCase =field.default else: __lowerCAmelCase =True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument __lowerCAmelCase =copy(snake_case_) # Hack because type=bool in argparse does not behave as we want. __lowerCAmelCase =string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. __lowerCAmelCase =False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way __lowerCAmelCase =default # This tells argparse we accept 0 or 1 value after --field_name __lowerCAmelCase ="""?""" # This is the value that will get picked if we do --field_name (without value) __lowerCAmelCase =True elif isclass(snake_case_) and issubclass(snake_case_ , snake_case_): __lowerCAmelCase =field.type.__args__[0] __lowerCAmelCase ="""+""" if field.default_factory is not dataclasses.MISSING: __lowerCAmelCase =field.default_factory() elif field.default is dataclasses.MISSING: __lowerCAmelCase =True else: __lowerCAmelCase =field.type if field.default is not dataclasses.MISSING: __lowerCAmelCase =field.default elif field.default_factory is not dataclasses.MISSING: __lowerCAmelCase =field.default_factory() else: __lowerCAmelCase =True parser.add_argument(snake_case_ , *snake_case_ , **snake_case_) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): __lowerCAmelCase =False parser.add_argument(F"""--no_{field.name}""" , action="""store_false""" , dest=field.name , **snake_case_) def UpperCamelCase ( self : Optional[int] , snake_case_ : DataClassType)-> Dict: if hasattr(snake_case_ , """_argument_group_name"""): __lowerCAmelCase =self.add_argument_group(dtype._argument_group_name) else: __lowerCAmelCase =self try: __lowerCAmelCase =get_type_hints(snake_case_) except NameError: raise RuntimeError( F"""Type resolution failed for {dtype}. Try declaring the class in global scope or """ """removing line of `from __future__ import annotations` which opts in Postponed """ """Evaluation of Annotations (PEP 563)""") except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(snake_case_): __lowerCAmelCase =""".""".join(map(snake_case_ , sys.version_info[:3])) raise RuntimeError( F"""Type resolution failed for {dtype} on Python {python_version}. Try removing """ """line of `from __future__ import annotations` which opts in union types as """ """`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """ """support Python versions that lower than 3.10, you need to use """ """`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """ """`X | None`.""") from ex raise for field in dataclasses.fields(snake_case_): if not field.init: continue __lowerCAmelCase =type_hints[field.name] self._parse_dataclass_field(snake_case_ , snake_case_) def UpperCamelCase ( self : Optional[Any] , snake_case_ : Dict=None , snake_case_ : Optional[Any]=False , snake_case_ : Optional[int]=True , snake_case_ : Optional[Any]=None , snake_case_ : List[Any]=None , )-> Tuple[DataClass, ...]: if args_file_flag or args_filename or (look_for_args_file and len(sys.argv)): __lowerCAmelCase =[] if args_filename: args_files.append(Path(snake_case_)) elif look_for_args_file and len(sys.argv): args_files.append(Path(sys.argv[0]).with_suffix(""".args""")) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values __lowerCAmelCase =ArgumentParser() args_file_parser.add_argument(snake_case_ , type=snake_case_ , action="""append""") # Use only remaining args for further parsing (remove the args_file_flag) __lowerCAmelCase , __lowerCAmelCase =args_file_parser.parse_known_args(args=snake_case_) __lowerCAmelCase =vars(snake_case_).get(args_file_flag.lstrip("""-""") , snake_case_) if cmd_args_file_paths: args_files.extend([Path(snake_case_) for p in cmd_args_file_paths]) __lowerCAmelCase =[] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last __lowerCAmelCase =file_args + args if args is not None else file_args + sys.argv[1:] __lowerCAmelCase , __lowerCAmelCase =self.parse_known_args(args=snake_case_) __lowerCAmelCase =[] for dtype in self.dataclass_types: __lowerCAmelCase ={f.name for f in dataclasses.fields(snake_case_) if f.init} __lowerCAmelCase ={k: v for k, v in vars(snake_case_).items() if k in keys} for k in keys: delattr(snake_case_ , snake_case_) __lowerCAmelCase =dtype(**snake_case_) outputs.append(snake_case_) if len(namespace.__dict__) > 0: # additional namespace. outputs.append(snake_case_) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(F"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""") return (*outputs,) def UpperCamelCase ( self : Union[str, Any] , snake_case_ : Dict[str, Any] , snake_case_ : bool = False)-> Tuple[DataClass, ...]: __lowerCAmelCase =set(args.keys()) __lowerCAmelCase =[] for dtype in self.dataclass_types: __lowerCAmelCase ={f.name for f in dataclasses.fields(snake_case_) if f.init} __lowerCAmelCase ={k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys()) __lowerCAmelCase =dtype(**snake_case_) outputs.append(snake_case_) if not allow_extra_keys and unused_keys: raise ValueError(F"""Some keys are not used by the HfArgumentParser: {sorted(snake_case_)}""") return tuple(snake_case_) def UpperCamelCase ( self : Dict , snake_case_ : str , snake_case_ : bool = False)-> Tuple[DataClass, ...]: with open(Path(snake_case_) , encoding="""utf-8""") as open_json_file: __lowerCAmelCase =json.loads(open_json_file.read()) __lowerCAmelCase =self.parse_dict(snake_case_ , allow_extra_keys=snake_case_) return tuple(snake_case_) def UpperCamelCase ( self : Tuple , snake_case_ : str , snake_case_ : bool = False)-> Tuple[DataClass, ...]: __lowerCAmelCase =self.parse_dict(yaml.safe_load(Path(snake_case_).read_text()) , allow_extra_keys=snake_case_) return tuple(snake_case_)
354
from manim import * class __a ( SCREAMING_SNAKE_CASE ): def UpperCamelCase ( self : Tuple)-> Dict: __lowerCAmelCase =Rectangle(height=0.5 , width=0.5) __lowerCAmelCase =Rectangle(height=0.4_6 , width=0.4_6).set_stroke(width=0) __lowerCAmelCase =[mem.copy() for i in range(6)] __lowerCAmelCase =[mem.copy() for i in range(6)] __lowerCAmelCase =VGroup(*snake_case_).arrange(snake_case_ , buff=0) __lowerCAmelCase =VGroup(*snake_case_).arrange(snake_case_ , buff=0) __lowerCAmelCase =VGroup(snake_case_ , snake_case_).arrange(snake_case_ , buff=0) __lowerCAmelCase =Text("""CPU""" , font_size=24) __lowerCAmelCase =Group(snake_case_ , snake_case_).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_) cpu.move_to([-2.5, -0.5, 0]) self.add(snake_case_) __lowerCAmelCase =[mem.copy() for i in range(1)] __lowerCAmelCase =VGroup(*snake_case_).arrange(snake_case_ , buff=0) __lowerCAmelCase =Text("""GPU""" , font_size=24) __lowerCAmelCase =Group(snake_case_ , snake_case_).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_) gpu.align_to(snake_case_ , snake_case_) gpu.set_x(gpu.get_x() - 1) self.add(snake_case_) __lowerCAmelCase =[mem.copy() for i in range(6)] __lowerCAmelCase =VGroup(*snake_case_).arrange(snake_case_ , buff=0) __lowerCAmelCase =Text("""Model""" , font_size=24) __lowerCAmelCase =Group(snake_case_ , snake_case_).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_) model.move_to([3, -1.0, 0]) self.play( Create(snake_case_ , run_time=1) , Create(snake_case_ , run_time=1) , Create(snake_case_ , run_time=1) , ) __lowerCAmelCase =MarkupText( F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , ) __lowerCAmelCase =Square(side_length=2.2) key.move_to([-5, 2, 0]) __lowerCAmelCase =MarkupText( F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0]) step_a.move_to([2, 2, 0]) self.play(Write(snake_case_ , run_time=2.5) , Write(snake_case_) , Write(snake_case_)) self.add(snake_case_) __lowerCAmelCase =[] __lowerCAmelCase =[] __lowerCAmelCase =[] for i, rect in enumerate(snake_case_): __lowerCAmelCase =Rectangle(height=0.4_6 , width=0.4_6).set_stroke(width=0.0).set_fill(snake_case_ , opacity=0.7) cpu_target.move_to(snake_case_) cpu_target.generate_target() __lowerCAmelCase =0.4_6 / 4 __lowerCAmelCase =0.4_6 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.0_2 , direction=snake_case_) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case_ , buff=0.0) else: cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case_ , buff=0.0) cpu_targs.append(snake_case_) first_animations.append(rect.animate(run_time=0.5).set_stroke(snake_case_)) second_animations.append(MoveToTarget(snake_case_ , run_time=1.5)) self.play(*snake_case_) self.play(*snake_case_) self.wait()
354
1
import copy import os import cva import numpy as np from matplotlib import pyplot as plt class lowercase : def __init__( self ) -> Any: """simple docstring""" UpperCamelCase = '' UpperCamelCase = '' UpperCamelCase = [] UpperCamelCase = 0 UpperCamelCase = 256 UpperCamelCase = 0 UpperCamelCase = 0 UpperCamelCase = 0 UpperCamelCase = 0 def __UpperCamelCase ( self , A_ ) -> Dict: """simple docstring""" UpperCamelCase = cva.imread(A_ , 0 ) UpperCamelCase = copy.deepcopy(self.img ) UpperCamelCase , UpperCamelCase , UpperCamelCase = plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' ) UpperCamelCase = np.sum(A_ ) for i in range(len(A_ ) ): UpperCamelCase = x[i] / self.k self.sk += prk UpperCamelCase = (self.L - 1) * self.sk if self.rem != 0: UpperCamelCase = int(last % last ) UpperCamelCase = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(A_ ) UpperCamelCase = int(np.ma.count(self.img ) / self.img[1].size ) UpperCamelCase = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): UpperCamelCase = self.img[j][i] if num != self.last_list[num]: UpperCamelCase = self.last_list[num] cva.imwrite('output_data/output.jpg' , self.img ) def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" plt.hist(self.img.ravel() , 256 , [0, 256] ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" cva.imshow('Output-Image' , self.img ) cva.imshow('Input-Image' , self.original_image ) cva.waitKey(5_000 ) cva.destroyAllWindows() if __name__ == "__main__": _UpperCAmelCase : Any = os.path.join(os.path.basename(__file__), "image_data/input.jpg") _UpperCAmelCase : Optional[int] = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
3
import importlib import math import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Tuple, Union import flax import jax.numpy as jnp from ..utils import BaseOutput _UpperCAmelCase : str = "scheduler_config.json" class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : Tuple = 1 __lowercase : int = 2 __lowercase : List[Any] = 3 __lowercase : str = 4 __lowercase : Optional[Any] = 5 @dataclass class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : jnp.ndarray class lowercase : __lowercase : Union[str, Any] = SCHEDULER_CONFIG_NAME __lowercase : Dict = ["dtype"] __lowercase : List[Any] = [] __lowercase : Dict = True @classmethod def __UpperCamelCase ( cls , A_ = None , A_ = None , A_=False , **A_ , ) -> Optional[Any]: """simple docstring""" UpperCamelCase , UpperCamelCase = cls.load_config( pretrained_model_name_or_path=A_ , subfolder=A_ , return_unused_kwargs=A_ , **A_ , ) UpperCamelCase , UpperCamelCase = cls.from_config(A_ , return_unused_kwargs=A_ , **A_ ) if hasattr(A_ , 'create_state' ) and getattr(A_ , 'has_state' , A_ ): UpperCamelCase = scheduler.create_state() if return_unused_kwargs: return scheduler, state, unused_kwargs return scheduler, state def __UpperCamelCase ( self , A_ , A_ = False , **A_ ) -> str: """simple docstring""" self.save_config(save_directory=A_ , push_to_hub=A_ , **A_ ) @property def __UpperCamelCase ( self ) -> int: """simple docstring""" return self._get_compatibles() @classmethod def __UpperCamelCase ( cls ) -> int: """simple docstring""" UpperCamelCase = list(set([cls.__name__] + cls._compatibles ) ) UpperCamelCase = importlib.import_module(__name__.split('.' )[0] ) UpperCamelCase = [ getattr(A_ , A_ ) for c in compatible_classes_str if hasattr(A_ , A_ ) ] return compatible_classes def A ( lowercase , lowercase ) -> jnp.ndarray: '''simple docstring''' assert len(lowercase ) >= x.ndim return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowercase ) - x.ndim) ) , lowercase ) def A ( lowercase , lowercase=0.9_9_9 , lowercase=jnp.floataa ) -> jnp.ndarray: '''simple docstring''' def alpha_bar(lowercase ): return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 UpperCamelCase = [] for i in range(lowercase ): UpperCamelCase = i / num_diffusion_timesteps UpperCamelCase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(lowercase ) / alpha_bar(lowercase ) , lowercase ) ) return jnp.array(lowercase , dtype=lowercase ) @flax.struct.dataclass class lowercase : __lowercase : jnp.ndarray __lowercase : jnp.ndarray __lowercase : jnp.ndarray @classmethod def __UpperCamelCase ( cls , A_ ) -> Optional[int]: """simple docstring""" UpperCamelCase = scheduler.config if config.trained_betas is not None: UpperCamelCase = jnp.asarray(config.trained_betas , dtype=scheduler.dtype ) elif config.beta_schedule == "linear": UpperCamelCase = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype ) elif config.beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. UpperCamelCase = ( jnp.linspace( config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype ) ** 2 ) elif config.beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule UpperCamelCase = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype ) else: raise NotImplementedError( F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' ) UpperCamelCase = 1.0 - betas UpperCamelCase = jnp.cumprod(A_ , axis=0 ) return cls( alphas=A_ , betas=A_ , alphas_cumprod=A_ , ) def A ( lowercase , lowercase , lowercase , lowercase ) -> List[Any]: '''simple docstring''' UpperCamelCase = state.alphas_cumprod UpperCamelCase = alphas_cumprod[timesteps] ** 0.5 UpperCamelCase = sqrt_alpha_prod.flatten() UpperCamelCase = broadcast_to_shape_from_left(lowercase , original_samples.shape ) UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5 UpperCamelCase = sqrt_one_minus_alpha_prod.flatten() UpperCamelCase = broadcast_to_shape_from_left(lowercase , original_samples.shape ) return sqrt_alpha_prod, sqrt_one_minus_alpha_prod def A ( lowercase , lowercase , lowercase , lowercase ) -> Dict: '''simple docstring''' UpperCamelCase , UpperCamelCase = get_sqrt_alpha_prod(lowercase , lowercase , lowercase , lowercase ) UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def A ( lowercase , lowercase , lowercase , lowercase ) -> int: '''simple docstring''' UpperCamelCase , UpperCamelCase = get_sqrt_alpha_prod(lowercase , lowercase , lowercase , lowercase ) UpperCamelCase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity
3
1
import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) class snake_case_ ( lowercase__ ): """simple docstring""" def __init__(self: Union[str, Any] , __UpperCAmelCase: str ) -> Dict: '''simple docstring''' super().__init__() __a : Optional[Any] = nn.ModuleList(__UpperCAmelCase ) def UpperCAmelCase__ (self: Dict , __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: List[Any] , __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: List[Any] , __UpperCAmelCase: Dict = None , __UpperCAmelCase: str = None , __UpperCAmelCase: Dict = None , __UpperCAmelCase: List[Any] = None , __UpperCAmelCase: Optional[int] = False , __UpperCAmelCase: Any = True , ) -> Dict: '''simple docstring''' for i, (image, scale, controlnet) in enumerate(zip(__UpperCAmelCase , __UpperCAmelCase , self.nets ) ): __a , __a : Tuple = controlnet( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) # merge samples if i == 0: __a , __a : int = down_samples, mid_sample else: __a : Union[str, Any] = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(__UpperCAmelCase , __UpperCAmelCase ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def UpperCAmelCase__ (self: Optional[Any] , __UpperCAmelCase: Dict , __UpperCAmelCase: List[str] = True , __UpperCAmelCase: List[str] = None , __UpperCAmelCase: Any = False , __UpperCAmelCase: Optional[Any] = None , ) -> Dict: '''simple docstring''' __a : Optional[int] = 0 __a : str = save_directory for controlnet in self.nets: controlnet.save_pretrained( __UpperCAmelCase , is_main_process=__UpperCAmelCase , save_function=__UpperCAmelCase , safe_serialization=__UpperCAmelCase , variant=__UpperCAmelCase , ) idx += 1 __a : Dict = model_path_to_save + f'_{idx}' @classmethod def UpperCAmelCase__ (cls: int , __UpperCAmelCase: List[Any] , **__UpperCAmelCase: Optional[Any] ) -> Dict: '''simple docstring''' __a : Any = 0 __a : List[Any] = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... __a : Tuple = pretrained_model_path while os.path.isdir(__UpperCAmelCase ): __a : Any = ControlNetModel.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase ) controlnets.append(__UpperCAmelCase ) idx += 1 __a : Optional[int] = pretrained_model_path + f'_{idx}' logger.info(f'{len(__UpperCAmelCase )} controlnets loaded from {pretrained_model_path}.' ) if len(__UpperCAmelCase ) == 0: raise ValueError( f'No ControlNets found under {os.path.dirname(__UpperCAmelCase )}. Expected at least {pretrained_model_path + "_0"}.' ) return cls(__UpperCAmelCase )
351
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available SCREAMING_SNAKE_CASE = {"tokenization_herbert": ["HerbertTokenizer"]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE = ["HerbertTokenizerFast"] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
579
0
from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class A ( lowerCamelCase_ ): _SCREAMING_SNAKE_CASE : torch.FloatTensor class A ( lowerCamelCase_ , lowerCamelCase_ ): @register_to_config def __init__( self : Optional[Any] , __UpperCAmelCase : int = 16 , __UpperCAmelCase : int = 88 , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : int = 1 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : int = 32 , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : str = "geglu" , __UpperCAmelCase : bool = True , __UpperCAmelCase : bool = True , ) -> str: """simple docstring""" super().__init__() UpperCamelCase_ = num_attention_heads UpperCamelCase_ = attention_head_dim UpperCamelCase_ = num_attention_heads * attention_head_dim UpperCamelCase_ = in_channels UpperCamelCase_ = torch.nn.GroupNorm(num_groups=__UpperCAmelCase , num_channels=__UpperCAmelCase , eps=1E-6 , affine=__UpperCAmelCase ) UpperCamelCase_ = nn.Linear(__UpperCAmelCase , __UpperCAmelCase ) # 3. Define transformers blocks UpperCamelCase_ = nn.ModuleList( [ BasicTransformerBlock( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , dropout=__UpperCAmelCase , cross_attention_dim=__UpperCAmelCase , activation_fn=__UpperCAmelCase , attention_bias=__UpperCAmelCase , double_self_attention=__UpperCAmelCase , norm_elementwise_affine=__UpperCAmelCase , ) for d in range(__UpperCAmelCase ) ] ) UpperCamelCase_ = nn.Linear(__UpperCAmelCase , __UpperCAmelCase ) def lowercase__ ( self : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : int=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : str=1 , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : bool = True , ) -> List[Any]: """simple docstring""" UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = hidden_states.shape UpperCamelCase_ = batch_frames // num_frames UpperCamelCase_ = hidden_states UpperCamelCase_ = hidden_states[None, :].reshape(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) UpperCamelCase_ = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) UpperCamelCase_ = self.norm(__UpperCAmelCase ) UpperCamelCase_ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __UpperCAmelCase , __UpperCAmelCase ) UpperCamelCase_ = self.proj_in(__UpperCAmelCase ) # 2. Blocks for block in self.transformer_blocks: UpperCamelCase_ = block( __UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , timestep=__UpperCAmelCase , cross_attention_kwargs=__UpperCAmelCase , class_labels=__UpperCAmelCase , ) # 3. Output UpperCamelCase_ = self.proj_out(__UpperCAmelCase ) UpperCamelCase_ = ( hidden_states[None, None, :] .reshape(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) UpperCamelCase_ = hidden_states.reshape(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) UpperCamelCase_ = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=__UpperCAmelCase )
706
from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig __a : Union[str, Any] = logging.get_logger(__name__) # General docstring __a : List[Any] = """MobileNetV1Config""" # Base docstring __a : List[Any] = """google/mobilenet_v1_1.0_224""" __a : Optional[Any] = [1, 10_24, 7, 7] # Image classification docstring __a : Optional[int] = """google/mobilenet_v1_1.0_224""" __a : str = """tabby, tabby cat""" __a : List[str] = [ """google/mobilenet_v1_1.0_224""", """google/mobilenet_v1_0.75_192""", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def a_ ( __snake_case , __snake_case , __snake_case=None ) -> Any: '''simple docstring''' UpperCamelCase_ = {} if isinstance(__snake_case , __snake_case ): UpperCamelCase_ = model.mobilenet_va else: UpperCamelCase_ = model UpperCamelCase_ = 'MobilenetV1/Conv2d_0/' UpperCamelCase_ = backbone.conv_stem.convolution.weight UpperCamelCase_ = backbone.conv_stem.normalization.bias UpperCamelCase_ = backbone.conv_stem.normalization.weight UpperCamelCase_ = backbone.conv_stem.normalization.running_mean UpperCamelCase_ = backbone.conv_stem.normalization.running_var for i in range(1_3 ): UpperCamelCase_ = i + 1 UpperCamelCase_ = i * 2 UpperCamelCase_ = backbone.layer[pt_index] UpperCamelCase_ = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/''' UpperCamelCase_ = pointer.convolution.weight UpperCamelCase_ = pointer.normalization.bias UpperCamelCase_ = pointer.normalization.weight UpperCamelCase_ = pointer.normalization.running_mean UpperCamelCase_ = pointer.normalization.running_var UpperCamelCase_ = backbone.layer[pt_index + 1] UpperCamelCase_ = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/''' UpperCamelCase_ = pointer.convolution.weight UpperCamelCase_ = pointer.normalization.bias UpperCamelCase_ = pointer.normalization.weight UpperCamelCase_ = pointer.normalization.running_mean UpperCamelCase_ = pointer.normalization.running_var if isinstance(__snake_case , __snake_case ): UpperCamelCase_ = 'MobilenetV1/Logits/Conv2d_1c_1x1/' UpperCamelCase_ = model.classifier.weight UpperCamelCase_ = model.classifier.bias return tf_to_pt_map def a_ ( __snake_case , __snake_case , __snake_case ) -> Optional[int]: '''simple docstring''' try: import numpy as np import tensorflow as tf except ImportError: logger.error( 'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see ' 'https://www.tensorflow.org/install/ for installation instructions.' ) raise # Load weights from TF model UpperCamelCase_ = tf.train.list_variables(__snake_case ) UpperCamelCase_ = {} for name, shape in init_vars: logger.info(F'''Loading TF weight {name} with shape {shape}''' ) UpperCamelCase_ = tf.train.load_variable(__snake_case , __snake_case ) UpperCamelCase_ = array # Build TF to PyTorch weights loading map UpperCamelCase_ = _build_tf_to_pytorch_map(__snake_case , __snake_case , __snake_case ) for name, pointer in tf_to_pt_map.items(): logger.info(F'''Importing {name}''' ) if name not in tf_weights: logger.info(F'''{name} not in tf pre-trained weights, skipping''' ) continue UpperCamelCase_ = tf_weights[name] if "depthwise_weights" in name: logger.info('Transposing depthwise' ) UpperCamelCase_ = np.transpose(__snake_case , (2, 3, 0, 1) ) elif "weights" in name: logger.info('Transposing' ) if len(pointer.shape ) == 2: # copying into linear layer UpperCamelCase_ = array.squeeze().transpose() else: UpperCamelCase_ = np.transpose(__snake_case , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' ) logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' ) UpperCamelCase_ = torch.from_numpy(__snake_case ) tf_weights.pop(__snake_case , __snake_case ) tf_weights.pop(name + '/RMSProp' , __snake_case ) tf_weights.pop(name + '/RMSProp_1' , __snake_case ) tf_weights.pop(name + '/ExponentialMovingAverage' , __snake_case ) logger.info(F'''Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}''' ) return model def a_ ( __snake_case , __snake_case ) -> torch.Tensor: '''simple docstring''' UpperCamelCase_ , UpperCamelCase_ = features.shape[-2:] UpperCamelCase_ , UpperCamelCase_ = conv_layer.stride UpperCamelCase_ , UpperCamelCase_ = conv_layer.kernel_size if in_height % stride_height == 0: UpperCamelCase_ = max(kernel_height - stride_height , 0 ) else: UpperCamelCase_ = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: UpperCamelCase_ = max(kernel_width - stride_width , 0 ) else: UpperCamelCase_ = max(kernel_width - (in_width % stride_width) , 0 ) UpperCamelCase_ = pad_along_width // 2 UpperCamelCase_ = pad_along_width - pad_left UpperCamelCase_ = pad_along_height // 2 UpperCamelCase_ = pad_along_height - pad_top UpperCamelCase_ = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(__snake_case , __snake_case , 'constant' , 0.0 ) class A ( nn.Module ): def __init__( self : Any , __UpperCAmelCase : MobileNetVaConfig , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[bool] = True , __UpperCAmelCase : Optional[bool or str] = True , ) -> None: """simple docstring""" super().__init__() UpperCamelCase_ = config if in_channels % groups != 0: raise ValueError(f'''Input channels ({in_channels}) are not divisible by {groups} groups.''' ) if out_channels % groups != 0: raise ValueError(f'''Output channels ({out_channels}) are not divisible by {groups} groups.''' ) UpperCamelCase_ = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) UpperCamelCase_ = nn.Convad( in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , kernel_size=__UpperCAmelCase , stride=__UpperCAmelCase , padding=__UpperCAmelCase , groups=__UpperCAmelCase , bias=__UpperCAmelCase , padding_mode='zeros' , ) if use_normalization: UpperCamelCase_ = nn.BatchNormad( num_features=__UpperCAmelCase , eps=config.layer_norm_eps , momentum=0.9_997 , affine=__UpperCAmelCase , track_running_stats=__UpperCAmelCase , ) else: UpperCamelCase_ = None if use_activation: if isinstance(__UpperCAmelCase , __UpperCAmelCase ): UpperCamelCase_ = ACTaFN[use_activation] elif isinstance(config.hidden_act , __UpperCAmelCase ): UpperCamelCase_ = ACTaFN[config.hidden_act] else: UpperCamelCase_ = config.hidden_act else: UpperCamelCase_ = None def lowercase__ ( self : Tuple , __UpperCAmelCase : torch.Tensor ) -> torch.Tensor: """simple docstring""" if self.config.tf_padding: UpperCamelCase_ = apply_tf_padding(__UpperCAmelCase , self.convolution ) UpperCamelCase_ = self.convolution(__UpperCAmelCase ) if self.normalization is not None: UpperCamelCase_ = self.normalization(__UpperCAmelCase ) if self.activation is not None: UpperCamelCase_ = self.activation(__UpperCAmelCase ) return features class A ( lowerCamelCase_ ): _SCREAMING_SNAKE_CASE : Tuple = MobileNetVaConfig _SCREAMING_SNAKE_CASE : List[Any] = load_tf_weights_in_mobilenet_va _SCREAMING_SNAKE_CASE : Dict = '''mobilenet_v1''' _SCREAMING_SNAKE_CASE : Tuple = '''pixel_values''' _SCREAMING_SNAKE_CASE : Tuple = False def lowercase__ ( self : List[str] , __UpperCAmelCase : Union[nn.Linear, nn.Convad] ) -> None: """simple docstring""" if isinstance(__UpperCAmelCase , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__UpperCAmelCase , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) __a : Any = R""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ __a : Optional[Any] = R""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`MobileNetV1ImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , lowerCamelCase_ , ) class A ( lowerCamelCase_ ): def __init__( self : Dict , __UpperCAmelCase : MobileNetVaConfig , __UpperCAmelCase : bool = True ) -> Optional[int]: """simple docstring""" super().__init__(__UpperCAmelCase ) UpperCamelCase_ = config UpperCamelCase_ = 32 UpperCamelCase_ = max(int(depth * config.depth_multiplier ) , config.min_depth ) UpperCamelCase_ = MobileNetVaConvLayer( __UpperCAmelCase , in_channels=config.num_channels , out_channels=__UpperCAmelCase , kernel_size=3 , stride=2 , ) UpperCamelCase_ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] UpperCamelCase_ = nn.ModuleList() for i in range(13 ): UpperCamelCase_ = out_channels if strides[i] == 2 or i == 0: depth *= 2 UpperCamelCase_ = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( __UpperCAmelCase , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , kernel_size=3 , stride=strides[i] , groups=__UpperCAmelCase , ) ) self.layer.append( MobileNetVaConvLayer( __UpperCAmelCase , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , kernel_size=1 , ) ) UpperCamelCase_ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def lowercase__ ( self : Dict , __UpperCAmelCase : Tuple ) -> Dict: """simple docstring""" raise NotImplementedError @add_start_docstrings_to_model_forward(__UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowercase__ ( self : List[str] , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]: """simple docstring""" UpperCamelCase_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values' ) UpperCamelCase_ = self.conv_stem(__UpperCAmelCase ) UpperCamelCase_ = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): UpperCamelCase_ = layer_module(__UpperCAmelCase ) if output_hidden_states: UpperCamelCase_ = all_hidden_states + (hidden_states,) UpperCamelCase_ = hidden_states if self.pooler is not None: UpperCamelCase_ = torch.flatten(self.pooler(__UpperCAmelCase ) , start_dim=1 ) else: UpperCamelCase_ = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__UpperCAmelCase , pooler_output=__UpperCAmelCase , hidden_states=__UpperCAmelCase , ) @add_start_docstrings( ''' MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , lowerCamelCase_ , ) class A ( lowerCamelCase_ ): def __init__( self : Union[str, Any] , __UpperCAmelCase : MobileNetVaConfig ) -> None: """simple docstring""" super().__init__(__UpperCAmelCase ) UpperCamelCase_ = config.num_labels UpperCamelCase_ = MobileNetVaModel(__UpperCAmelCase ) UpperCamelCase_ = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head UpperCamelCase_ = nn.Dropout(config.classifier_dropout_prob , inplace=__UpperCAmelCase ) UpperCamelCase_ = nn.Linear(__UpperCAmelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowercase__ ( self : Any , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[bool] = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]: """simple docstring""" UpperCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict UpperCamelCase_ = self.mobilenet_va(__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase ) UpperCamelCase_ = outputs.pooler_output if return_dict else outputs[1] UpperCamelCase_ = self.classifier(self.dropout(__UpperCAmelCase ) ) UpperCamelCase_ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: UpperCamelCase_ = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): UpperCamelCase_ = 'single_label_classification' else: UpperCamelCase_ = 'multi_label_classification' if self.config.problem_type == "regression": UpperCamelCase_ = MSELoss() if self.num_labels == 1: UpperCamelCase_ = loss_fct(logits.squeeze() , labels.squeeze() ) else: UpperCamelCase_ = loss_fct(__UpperCAmelCase , __UpperCAmelCase ) elif self.config.problem_type == "single_label_classification": UpperCamelCase_ = CrossEntropyLoss() UpperCamelCase_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": UpperCamelCase_ = BCEWithLogitsLoss() UpperCamelCase_ = loss_fct(__UpperCAmelCase , __UpperCAmelCase ) if not return_dict: UpperCamelCase_ = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=__UpperCAmelCase , logits=__UpperCAmelCase , hidden_states=outputs.hidden_states , )
559
0
import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ): a_ : Optional[int] = "M-CLIP" def __init__(self , UpperCAmelCase=1_0_2_4 , UpperCAmelCase=7_6_8 , **UpperCAmelCase): '''simple docstring''' __UpperCAmelCase =transformerDimSize __UpperCAmelCase =imageDimSize super().__init__(**SCREAMING_SNAKE_CASE__) class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ): a_ : int = MCLIPConfig def __init__(self , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase): '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__) __UpperCAmelCase =XLMRobertaModel(SCREAMING_SNAKE_CASE__) __UpperCAmelCase =torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims) def A__ (self , UpperCAmelCase , UpperCAmelCase): '''simple docstring''' __UpperCAmelCase =self.transformer(input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__)[0] __UpperCAmelCase =(embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None] return self.LinearTransformation(SCREAMING_SNAKE_CASE__), embs
132
import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() _snake_case = { '''bart''': ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), '''bert''': ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''bert-base-cased-finetuned-mrpc''': ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''dpr''': ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), '''gpt2''': ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''xlnet''': ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''xlm''': ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''xlm-roberta''': ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''transfo-xl''': ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''openai-gpt''': ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''roberta''': ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''layoutlm''': ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), '''roberta-large-mnli''': ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''camembert''': ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''flaubert''': ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''distilbert''': ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''distilbert-base-distilled-squad''': ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''lxmert''': ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''lxmert-visual-feature-encoder''': ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''ctrl''': ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''albert''': ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''t5''': ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''electra''': ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''wav2vec2''': ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=False , _lowercase=True ) -> List[Any]: if model_type not in MODEL_CLASSES: raise ValueError(F'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: UpperCamelCase = cached_file(_lowercase , _lowercase , force_download=not use_cached_models ) UpperCamelCase = config_class.from_json_file(_lowercase ) UpperCamelCase = True UpperCamelCase = True print(F'Building TensorFlow model from configuration: {config}' ) UpperCamelCase = model_class(_lowercase ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): UpperCamelCase = cached_file( _lowercase , _lowercase , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: UpperCamelCase = load_pytorch_checkpoint_in_tfa_model(_lowercase , _lowercase ) if compare_with_pt_model: UpperCamelCase = tf_model(tf_model.dummy_inputs , training=_lowercase ) # build the network UpperCamelCase = torch.load(_lowercase , map_location='cpu' ) UpperCamelCase = pt_model_class.from_pretrained( pretrained_model_name_or_path=_lowercase , config=_lowercase , state_dict=_lowercase ) with torch.no_grad(): UpperCamelCase = pt_model(**pt_model.dummy_inputs ) UpperCamelCase = pto[0].numpy() UpperCamelCase = tfo[0].numpy() UpperCamelCase = np.amax(np.abs(np_pt - np_tf ) ) print(F'Max absolute difference between models outputs {diff}' ) assert diff <= 2e-2, F'Error, model absolute difference is >2e-2: {diff}' # Save pytorch-model print(F'Save TensorFlow model to {tf_dump_path}' ) tf_model.save_weights(_lowercase , save_format='h5' ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=False , _lowercase=False , _lowercase=False , _lowercase=False , ) -> int: if args_model_type is None: UpperCamelCase = list(MODEL_CLASSES.keys() ) else: UpperCamelCase = [args_model_type] for j, model_type in enumerate(_lowercase , start=1 ): print('=' * 100 ) print(F' Converting model type {j}/{len(_lowercase )}: {model_type}' ) print('=' * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(F'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: UpperCamelCase = list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: UpperCamelCase = model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(_lowercase , _lowercase ) , start=1 ): print('-' * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(F' Skipping finetuned checkpoint {model_shortcut_name}' ) continue UpperCamelCase = model_shortcut_name elif only_convert_finetuned_models: print(F' Skipping not finetuned checkpoint {model_shortcut_name}' ) continue print( F' Converting checkpoint {i}/{len(_lowercase )}: {model_shortcut_name} - model_type {model_type}' ) print('-' * 100 ) if config_shortcut_name in aws_config_map: UpperCamelCase = cached_file(_lowercase , _lowercase , force_download=not use_cached_models ) else: UpperCamelCase = config_shortcut_name if model_shortcut_name in aws_model_maps: UpperCamelCase = cached_file(_lowercase , _lowercase , force_download=not use_cached_models ) else: UpperCamelCase = model_shortcut_name if os.path.isfile(_lowercase ): UpperCamelCase = 'converted_model' convert_pt_checkpoint_to_tf( model_type=_lowercase , pytorch_checkpoint_path=_lowercase , config_file=_lowercase , tf_dump_path=os.path.join(_lowercase , model_shortcut_name + '-tf_model.h5' ) , compare_with_pt_model=_lowercase , ) if remove_cached_files: os.remove(_lowercase ) os.remove(_lowercase ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_dump_path''', default=None, type=str, required=True, help='''Path to the output Tensorflow dump file.''' ) parser.add_argument( '''--model_type''', default=None, type=str, help=( F"Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and " '''convert all the models from AWS.''' ), ) parser.add_argument( '''--pytorch_checkpoint_path''', default=None, type=str, help=( '''Path to the PyTorch checkpoint path or shortcut name to download from AWS. ''' '''If not given, will download and convert all the checkpoints from AWS.''' ), ) parser.add_argument( '''--config_file''', default=None, type=str, help=( '''The config json file corresponding to the pre-trained model. \n''' '''This specifies the model architecture. If not given and ''' '''--pytorch_checkpoint_path is not given or is a shortcut name ''' '''use the configuration associated to the shortcut name on the AWS''' ), ) parser.add_argument( '''--compare_with_pt_model''', action='''store_true''', help='''Compare Tensorflow and PyTorch model predictions.''' ) parser.add_argument( '''--use_cached_models''', action='''store_true''', help='''Use cached models if possible instead of updating to latest checkpoint versions.''', ) parser.add_argument( '''--remove_cached_files''', action='''store_true''', help='''Remove pytorch models after conversion (save memory when converting in batches).''', ) parser.add_argument('''--only_convert_finetuned_models''', action='''store_true''', help='''Only convert finetuned models.''') _snake_case = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
282
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available A : Optional[Any] = { "configuration_groupvit": [ "GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GroupViTConfig", "GroupViTOnnxConfig", "GroupViTTextConfig", "GroupViTVisionConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Dict = [ "GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "GroupViTModel", "GroupViTPreTrainedModel", "GroupViTTextModel", "GroupViTVisionModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Any = [ "TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFGroupViTModel", "TFGroupViTPreTrainedModel", "TFGroupViTTextModel", "TFGroupViTVisionModel", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
356
import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings A : Tuple = r"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n" @add_start_docstrings(SCREAMING_SNAKE_CASE__ ) class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = '''rag''' lowerCamelCase__ = True def __init__( self : Any , __magic_name__ : Optional[int]=None , __magic_name__ : Dict=True , __magic_name__ : List[Any]=None , __magic_name__ : Optional[Any]=None , __magic_name__ : List[Any]=None , __magic_name__ : Any=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Any=" / " , __magic_name__ : int=" // " , __magic_name__ : Any=5 , __magic_name__ : Dict=300 , __magic_name__ : Optional[Any]=768 , __magic_name__ : str=8 , __magic_name__ : List[Any]="wiki_dpr" , __magic_name__ : Any="train" , __magic_name__ : Any="compressed" , __magic_name__ : List[Any]=None , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[int]=False , __magic_name__ : Union[str, Any]=False , __magic_name__ : List[str]=0.0 , __magic_name__ : Dict=True , __magic_name__ : str=False , __magic_name__ : int=False , __magic_name__ : Tuple=False , __magic_name__ : Tuple=True , __magic_name__ : Dict=None , **__magic_name__ : int , ) -> List[str]: super().__init__( bos_token_id=__magic_name__ , pad_token_id=__magic_name__ , eos_token_id=__magic_name__ , decoder_start_token_id=__magic_name__ , forced_eos_token_id=__magic_name__ , is_encoder_decoder=__magic_name__ , prefix=__magic_name__ , vocab_size=__magic_name__ , **__magic_name__ , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" SCREAMING_SNAKE_CASE_ = kwargs.pop("question_encoder" ) SCREAMING_SNAKE_CASE_ = question_encoder_config.pop("model_type" ) SCREAMING_SNAKE_CASE_ = kwargs.pop("generator" ) SCREAMING_SNAKE_CASE_ = decoder_config.pop("model_type" ) from ..auto.configuration_auto import AutoConfig SCREAMING_SNAKE_CASE_ = AutoConfig.for_model(__magic_name__ , **__magic_name__ ) SCREAMING_SNAKE_CASE_ = AutoConfig.for_model(__magic_name__ , **__magic_name__ ) SCREAMING_SNAKE_CASE_ = reduce_loss SCREAMING_SNAKE_CASE_ = label_smoothing SCREAMING_SNAKE_CASE_ = exclude_bos_score SCREAMING_SNAKE_CASE_ = do_marginalize SCREAMING_SNAKE_CASE_ = title_sep SCREAMING_SNAKE_CASE_ = doc_sep SCREAMING_SNAKE_CASE_ = n_docs SCREAMING_SNAKE_CASE_ = max_combined_length SCREAMING_SNAKE_CASE_ = dataset SCREAMING_SNAKE_CASE_ = dataset_split SCREAMING_SNAKE_CASE_ = index_name SCREAMING_SNAKE_CASE_ = retrieval_vector_size SCREAMING_SNAKE_CASE_ = retrieval_batch_size SCREAMING_SNAKE_CASE_ = passages_path SCREAMING_SNAKE_CASE_ = index_path SCREAMING_SNAKE_CASE_ = use_dummy_dataset SCREAMING_SNAKE_CASE_ = output_retrieved SCREAMING_SNAKE_CASE_ = do_deduplication SCREAMING_SNAKE_CASE_ = use_cache if self.forced_eos_token_id is None: SCREAMING_SNAKE_CASE_ = getattr(self.generator , "forced_eos_token_id" , __magic_name__ ) @classmethod def __A ( cls : Dict , __magic_name__ : PretrainedConfig , __magic_name__ : PretrainedConfig , **__magic_name__ : List[str] ) -> PretrainedConfig: return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__magic_name__ ) def __A ( self : Any ) -> List[Any]: SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE_ = self.question_encoder.to_dict() SCREAMING_SNAKE_CASE_ = self.generator.to_dict() SCREAMING_SNAKE_CASE_ = self.__class__.model_type return output
356
1
import warnings from .generation import TFGenerationMixin class lowerCAmelCase_ ( a__ ): # warning at import time warnings.warn( "Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will " "be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , a__ , )
40
'''simple docstring''' import fire from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer def _lowerCAmelCase ( lowercase : str , lowercase : str , **lowercase : Tuple ) ->Tuple: """simple docstring""" lowercase__ = AutoConfig.from_pretrained(lowercase , **lowercase ) lowercase__ = AutoModelForSeqaSeqLM.from_config(lowercase ) model.save_pretrained(lowercase ) AutoTokenizer.from_pretrained(lowercase ).save_pretrained(lowercase ) return model if __name__ == "__main__": fire.Fire(save_randomly_initialized_version)
161
0
'''simple docstring''' import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) __lowerCAmelCase : List[Any] = "hf-internal-testing/tiny-random-bert" __lowerCAmelCase : int = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert") __lowerCAmelCase : List[str] = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6" class A ( unittest.TestCase ): def snake_case__ ( self : List[str] ) -> Union[str, Any]: __UpperCAmelCase = cached_file(__a , __a ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(__a ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(__a , __a ) ) ) with open(os.path.join(__a , '''refs''' , '''main''' ) ) as f: __UpperCAmelCase = f.read() self.assertEqual(__a , os.path.join(__a , '''snapshots''' , __a , __a ) ) self.assertTrue(os.path.isfile(__a ) ) # File is cached at the same place the second time. __UpperCAmelCase = cached_file(__a , __a ) self.assertEqual(__a , __a ) # Using a specific revision to test the full commit hash. __UpperCAmelCase = cached_file(__a , __a , revision='''9b8c223''' ) self.assertEqual(__a , os.path.join(__a , '''snapshots''' , __a , __a ) ) def snake_case__ ( self : Tuple ) -> List[Any]: with self.assertRaisesRegex(__a , '''is not a valid model identifier''' ): __UpperCAmelCase = cached_file('''tiny-random-bert''' , __a ) with self.assertRaisesRegex(__a , '''is not a valid git identifier''' ): __UpperCAmelCase = cached_file(__a , __a , revision='''aaaa''' ) with self.assertRaisesRegex(__a , '''does not appear to have a file named''' ): __UpperCAmelCase = cached_file(__a , '''conf''' ) def snake_case__ ( self : List[Any] ) -> Optional[int]: with self.assertRaisesRegex(__a , '''does not appear to have a file named''' ): __UpperCAmelCase = cached_file(__a , '''conf''' ) with open(os.path.join(__a , '''refs''' , '''main''' ) ) as f: __UpperCAmelCase = f.read() self.assertTrue(os.path.isfile(os.path.join(__a , '''.no_exist''' , __a , '''conf''' ) ) ) __UpperCAmelCase = cached_file(__a , '''conf''' , _raise_exceptions_for_missing_entries=__a ) self.assertIsNone(__a ) __UpperCAmelCase = cached_file(__a , '''conf''' , local_files_only=__a , _raise_exceptions_for_missing_entries=__a ) self.assertIsNone(__a ) __UpperCAmelCase = mock.Mock() __UpperCAmelCase = 5_0_0 __UpperCAmelCase = {} __UpperCAmelCase = HTTPError __UpperCAmelCase = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=__a ) as mock_head: __UpperCAmelCase = cached_file(__a , '''conf''' , _raise_exceptions_for_connection_errors=__a ) self.assertIsNone(__a ) # This check we did call the fake head request mock_head.assert_called() def snake_case__ ( self : Optional[Any] ) -> Optional[Any]: self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __a ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __a ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __a ) ) def snake_case__ ( self : Optional[int] ) -> int: # `get_file_from_repo` returns None if the file does not exist self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(__a , '''is not a valid model identifier''' ): get_file_from_repo('''bert-base-case''' , __a ) # The function raises if the revision does not exist. with self.assertRaisesRegex(__a , '''is not a valid git identifier''' ): get_file_from_repo('''bert-base-cased''' , __a , revision='''ahaha''' ) __UpperCAmelCase = get_file_from_repo('''bert-base-cased''' , __a ) # The name is the cached name which is not very easy to test, so instead we load the content. __UpperCAmelCase = json.loads(open(__a , '''r''' ).read() ) self.assertEqual(config['''hidden_size'''] , 7_6_8 ) def snake_case__ ( self : Optional[int] ) -> Optional[Any]: with tempfile.TemporaryDirectory() as tmp_dir: __UpperCAmelCase = Path(__a ) / '''a.txt''' filename.touch() self.assertEqual(get_file_from_repo(__a , '''a.txt''' ) , str(__a ) ) self.assertIsNone(get_file_from_repo(__a , '''b.txt''' ) )
710
'''simple docstring''' import requests from bsa import BeautifulSoup def lowerCAmelCase ( UpperCamelCase__ : str = "AAPL" ): """simple docstring""" __UpperCAmelCase = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}""" __UpperCAmelCase = BeautifulSoup(requests.get(UpperCamelCase__ ).text , '''html.parser''' ) __UpperCAmelCase = '''My(6px) Pos(r) smartphone_Mt(6px)''' return soup.find('''div''' , class_=class_ ).find('''span''' ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
654
0
from __future__ import annotations import typing from collections.abc import Iterable import numpy as np __SCREAMING_SNAKE_CASE =typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 __SCREAMING_SNAKE_CASE =typing.Union[np.floataa, int, float] # noqa: UP007 def a (_lowerCAmelCase , _lowerCAmelCase ): return np.sqrt(np.sum((np.asarray(_lowercase ) - np.asarray(_lowercase )) ** 2 ) ) def a (_lowerCAmelCase , _lowerCAmelCase ): return sum((va - va) ** 2 for va, va in zip(_lowercase , _lowercase ) ) ** (1 / 2) if __name__ == "__main__": def a (): from timeit import timeit print('''Without Numpy''' ) print( timeit( '''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_0_0_0_0 , globals=globals() , ) ) print('''With Numpy''' ) print( timeit( '''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_0_0_0_0 , globals=globals() , ) ) benchmark()
234
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING __A = logging.get_logger(__name__) @add_end_docstrings(snake_case ) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" def __init__( self: int , *__A: Tuple , **__A: Optional[Any] ) -> Tuple: super().__init__(*__A , **__A ) requires_backends(self , '''vision''' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def __A ( self: Tuple , __A: Union[str, Any]=None , __A: int=None , __A: Optional[Any]=None ) -> Optional[Any]: _A = {} _A = {} if prompt is not None: _A = prompt if generate_kwargs is not None: _A = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: _A = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,''' ''' please use only one''' ) _A = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self: Any , __A: Union[str, List[str], "Image.Image", List["Image.Image"]] , **__A: str ) -> Dict: return super().__call__(__A , **__A ) def __A ( self: Any , __A: Dict , __A: int=None ) -> int: _A = load_image(__A ) if prompt is not None: if not isinstance(__A , __A ): raise ValueError( f"""Received an invalid text input, got - {type(__A )} - but expected a single string. """ '''Note also that one single text can be provided for conditional image to text generation.''' ) _A = self.model.config.model_type if model_type == "git": _A = self.image_processor(images=__A , return_tensors=self.framework ) _A = self.tokenizer(text=__A , add_special_tokens=__A ).input_ids _A = [self.tokenizer.cls_token_id] + input_ids _A = torch.tensor(__A ).unsqueeze(0 ) model_inputs.update({'''input_ids''': input_ids} ) elif model_type == "pix2struct": _A = self.image_processor(images=__A , header_text=__A , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation _A = self.image_processor(images=__A , return_tensors=self.framework ) _A = self.tokenizer(__A , return_tensors=self.framework ) model_inputs.update(__A ) else: raise ValueError(f"""Model type {model_type} does not support conditional text generation""" ) else: _A = self.image_processor(images=__A , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: _A = None return model_inputs def __A ( self: Optional[int] , __A: Optional[Any] , __A: Optional[int]=None ) -> Any: # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs['''input_ids'''] , __A ) and all(x is None for x in model_inputs['''input_ids'''] ) ): _A = None if generate_kwargs is None: _A = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. _A = model_inputs.pop(self.model.main_input_name ) _A = self.model.generate(__A , **__A , **__A ) return model_outputs def __A ( self: str , __A: Optional[int] ) -> Optional[int]: _A = [] for output_ids in model_outputs: _A = { '''generated_text''': self.tokenizer.decode( __A , skip_special_tokens=__A , ) } records.append(__A ) return records
484
0
'''simple docstring''' def snake_case_ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' if not nums: # Makes sure that the list is not empty raise ValueError("List is empty" ) _snake_case = sum(SCREAMING_SNAKE_CASE__ ) / len(SCREAMING_SNAKE_CASE__ ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": import doctest doctest.testmod()
368
'''simple docstring''' import json import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import OneFormerImageProcessor from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput if is_vision_available(): from PIL import Image def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="shi-labs/oneformer_demo" ): '''simple docstring''' with open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="dataset" ) , "r" ) as f: _snake_case = json.load(SCREAMING_SNAKE_CASE__ ) _snake_case = {} _snake_case = [] _snake_case = [] for key, info in class_info.items(): _snake_case = info["name"] class_names.append(info["name"] ) if info["isthing"]: thing_ids.append(int(SCREAMING_SNAKE_CASE__ ) ) _snake_case = thing_ids _snake_case = class_names return metadata class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=10 , lowerCamelCase=False , lowerCamelCase=255 , lowerCamelCase="shi-labs/oneformer_demo" , lowerCamelCase="ade20k_panoptic.json" , lowerCamelCase=10 , ): _snake_case = parent _snake_case = batch_size _snake_case = num_channels _snake_case = min_resolution _snake_case = max_resolution _snake_case = do_resize _snake_case = {"shortest_edge": 32, "longest_edge": 1_333} if size is None else size _snake_case = do_normalize _snake_case = image_mean _snake_case = image_std _snake_case = class_info_file _snake_case = prepare_metadata(lowerCamelCase , lowerCamelCase ) _snake_case = num_text _snake_case = repo_path # for the post_process_functions _snake_case = 2 _snake_case = 10 _snake_case = 10 _snake_case = 3 _snake_case = 4 _snake_case = num_labels _snake_case = do_reduce_labels _snake_case = ignore_index def UpperCamelCase( self ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "metadata": self.metadata, "num_text": self.num_text, } def UpperCamelCase( self , lowerCamelCase , lowerCamelCase=False ): if not batched: _snake_case = image_inputs[0] if isinstance(lowerCamelCase , Image.Image ): _snake_case , _snake_case = image.size else: _snake_case , _snake_case = image.shape[1], image.shape[2] if w < h: _snake_case = int(self.size["shortest_edge"] * h / w ) _snake_case = self.size["shortest_edge"] elif w > h: _snake_case = self.size["shortest_edge"] _snake_case = int(self.size["shortest_edge"] * w / h ) else: _snake_case = self.size["shortest_edge"] _snake_case = self.size["shortest_edge"] else: _snake_case = [] for image in image_inputs: _snake_case , _snake_case = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _snake_case = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0] _snake_case = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1] return expected_height, expected_width def UpperCamelCase( self ): return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , ) @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( __UpperCamelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_image_processing_common.test_image_proc_to_json_string UpperCAmelCase__ : Tuple = image_processing_class def UpperCamelCase( self ): _snake_case = OneFormerImageProcessorTester(self ) @property def UpperCamelCase( self ): return self.image_processing_tester.prepare_image_processor_dict() def UpperCamelCase( self ): _snake_case = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) ) self.assertTrue(hasattr(lowerCamelCase , "image_std" ) ) self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) ) self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) ) self.assertTrue(hasattr(lowerCamelCase , "size" ) ) self.assertTrue(hasattr(lowerCamelCase , "ignore_index" ) ) self.assertTrue(hasattr(lowerCamelCase , "class_info_file" ) ) self.assertTrue(hasattr(lowerCamelCase , "num_text" ) ) self.assertTrue(hasattr(lowerCamelCase , "repo_path" ) ) self.assertTrue(hasattr(lowerCamelCase , "metadata" ) ) self.assertTrue(hasattr(lowerCamelCase , "do_reduce_labels" ) ) def UpperCamelCase( self ): pass def UpperCamelCase( self ): # Initialize image_processor _snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _snake_case = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , Image.Image ) # Test not batched input _snake_case = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values _snake_case , _snake_case = self.image_processing_tester.get_expected_values(lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched _snake_case , _snake_case = self.image_processing_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase ) _snake_case = image_processor( lowerCamelCase , ["semantic"] * len(lowerCamelCase ) , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def UpperCamelCase( self ): # Initialize image_processor _snake_case = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _snake_case = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , np.ndarray ) # Test not batched input _snake_case = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values _snake_case , _snake_case = self.image_processing_tester.get_expected_values(lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched _snake_case , _snake_case = self.image_processing_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase ) _snake_case = image_processor( lowerCamelCase , ["semantic"] * len(lowerCamelCase ) , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def UpperCamelCase( self ): # Initialize image_processor _snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _snake_case = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , torch.Tensor ) # Test not batched input _snake_case = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values _snake_case , _snake_case = self.image_processing_tester.get_expected_values(lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched _snake_case , _snake_case = self.image_processing_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase ) _snake_case = image_processor( lowerCamelCase , ["semantic"] * len(lowerCamelCase ) , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def UpperCamelCase( self , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase="np" ): _snake_case = self.image_processing_class(**self.image_processor_dict ) # prepare image and target _snake_case = self.image_processing_tester.num_labels _snake_case = None _snake_case = None _snake_case = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase ) if with_segmentation_maps: _snake_case = num_labels if is_instance_map: _snake_case = list(range(lowerCamelCase ) ) * 2 _snake_case = dict(enumerate(lowerCamelCase ) ) _snake_case = [ np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs ] if segmentation_type == "pil": _snake_case = [Image.fromarray(lowerCamelCase ) for annotation in annotations] _snake_case = image_processor( lowerCamelCase , ["semantic"] * len(lowerCamelCase ) , lowerCamelCase , return_tensors="pt" , instance_id_to_semantic_id=lowerCamelCase , pad_and_return_pixel_mask=lowerCamelCase , ) return inputs def UpperCamelCase( self ): pass def UpperCamelCase( self ): def common(lowerCamelCase=False , lowerCamelCase=None ): _snake_case = self.comm_get_image_processor_inputs( with_segmentation_maps=lowerCamelCase , is_instance_map=lowerCamelCase , segmentation_type=lowerCamelCase ) _snake_case = inputs["mask_labels"] _snake_case = inputs["class_labels"] _snake_case = inputs["pixel_values"] _snake_case = inputs["text_inputs"] # check the batch_size for mask_label, class_label, text_input in zip(lowerCamelCase , lowerCamelCase , lowerCamelCase ): self.assertEqual(mask_label.shape[0] , class_label.shape[0] ) # this ensure padding has happened self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] ) self.assertEqual(len(lowerCamelCase ) , self.image_processing_tester.num_text ) common() common(is_instance_map=lowerCamelCase ) common(is_instance_map=lowerCamelCase , segmentation_type="pil" ) common(is_instance_map=lowerCamelCase , segmentation_type="pil" ) def UpperCamelCase( self ): _snake_case = np.zeros((20, 50) ) _snake_case = 1 _snake_case = 1 _snake_case = 1 _snake_case = binary_mask_to_rle(lowerCamelCase ) self.assertEqual(len(lowerCamelCase ) , 4 ) self.assertEqual(rle[0] , 21 ) self.assertEqual(rle[1] , 45 ) def UpperCamelCase( self ): _snake_case = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , ) _snake_case = self.image_processing_tester.get_fake_oneformer_outputs() _snake_case = fature_extractor.post_process_semantic_segmentation(lowerCamelCase ) self.assertEqual(len(lowerCamelCase ) , self.image_processing_tester.batch_size ) self.assertEqual( segmentation[0].shape , ( self.image_processing_tester.height, self.image_processing_tester.width, ) , ) _snake_case = [(1, 4) for i in range(self.image_processing_tester.batch_size )] _snake_case = fature_extractor.post_process_semantic_segmentation(lowerCamelCase , target_sizes=lowerCamelCase ) self.assertEqual(segmentation[0].shape , target_sizes[0] ) def UpperCamelCase( self ): _snake_case = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , ) _snake_case = self.image_processing_tester.get_fake_oneformer_outputs() _snake_case = image_processor.post_process_instance_segmentation(lowerCamelCase , threshold=0 ) self.assertTrue(len(lowerCamelCase ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("segmentation" in el ) self.assertTrue("segments_info" in el ) self.assertEqual(type(el["segments_info"] ) , lowerCamelCase ) self.assertEqual( el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) ) def UpperCamelCase( self ): _snake_case = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , ) _snake_case = self.image_processing_tester.get_fake_oneformer_outputs() _snake_case = image_processor.post_process_panoptic_segmentation(lowerCamelCase , threshold=0 ) self.assertTrue(len(lowerCamelCase ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("segmentation" in el ) self.assertTrue("segments_info" in el ) self.assertEqual(type(el["segments_info"] ) , lowerCamelCase ) self.assertEqual( el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
368
1