code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import os
import sys
import unittest
lowercase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowercase_ = os.path.join(git_repo_path, "src", "transformers")
lowercase_ = '\n{0} = None\n'
lowercase_ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
lowercase_ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def a ( self : Any )-> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Tuple = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" )
self.assertIsNone(__a )
UpperCAmelCase_ : Tuple = find_backend(""" if not is_tokenizers_available():""" )
self.assertEqual(__a , """tokenizers""" )
UpperCAmelCase_ : Tuple = find_backend(""" if not is_tensorflow_text_available():""" )
self.assertEqual(__a , """tensorflow_text""" )
UpperCAmelCase_ : Tuple = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" )
self.assertEqual(__a , """sentencepiece_and_tokenizers""" )
UpperCAmelCase_ : List[str] = find_backend(
""" if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" )
self.assertEqual(__a , """sentencepiece_and_tensorflow_text""" )
UpperCAmelCase_ : Dict = find_backend(
""" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" )
self.assertEqual(__a , """sentencepiece_and_tokenizers_and_vision""" )
def a ( self : List[Any] )-> Any:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , __a )
self.assertIn("""tensorflow_text""" , __a )
self.assertIn("""sentencepiece_and_tokenizers""" , __a )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertModel""" , objects["""tf"""] )
self.assertIn("""FlaxBertModel""" , objects["""flax"""] )
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""] )
self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""] )
def a ( self : List[str] )-> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = create_dummy_object("""CONSTANT""" , """'torch'""" )
self.assertEqual(__a , """\nCONSTANT = None\n""" )
UpperCAmelCase_ : Tuple = create_dummy_object("""function""" , """'torch'""" )
self.assertEqual(
__a , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
UpperCAmelCase_ : Dict = """\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n"""
UpperCAmelCase_ : List[str] = create_dummy_object("""FakeClass""" , """'torch'""" )
self.assertEqual(__a , __a )
def a ( self : Dict )-> Dict:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = """# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n"""
UpperCAmelCase_ : int = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , __a )
| 470 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : int , *__a : Tuple , **__a : Optional[Any] ):
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 692 | 0 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class a_ :
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=16 , __UpperCamelCase=[1, 2, 1] , __UpperCamelCase=[2, 2, 4] , __UpperCamelCase=2 , __UpperCamelCase=2.0 , __UpperCamelCase=True , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.1 , __UpperCamelCase="gelu" , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=0.02 , __UpperCamelCase=1E-5 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=10 , __UpperCamelCase=8 , __UpperCamelCase=["stage1", "stage2", "stage3"] , __UpperCamelCase=[1, 2, 3] , ):
_lowercase = parent
_lowercase = batch_size
_lowercase = image_size
_lowercase = patch_size
_lowercase = num_channels
_lowercase = embed_dim
_lowercase = depths
_lowercase = num_heads
_lowercase = window_size
_lowercase = mlp_ratio
_lowercase = qkv_bias
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = drop_path_rate
_lowercase = hidden_act
_lowercase = use_absolute_embeddings
_lowercase = patch_norm
_lowercase = layer_norm_eps
_lowercase = initializer_range
_lowercase = is_training
_lowercase = scope
_lowercase = use_labels
_lowercase = type_sequence_label_size
_lowercase = encoder_stride
_lowercase = out_features
_lowercase = out_indices
def UpperCamelCase_ ( self ):
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = None
if self.use_labels:
_lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ):
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
_lowercase = MaskFormerSwinModel(config=__a )
model.to(__a )
model.eval()
_lowercase = model(__a )
_lowercase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowercase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
_lowercase = MaskFormerSwinBackbone(config=__a )
model.to(__a )
model.eval()
_lowercase = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(__a ):
_lowercase = ["""stem"""]
_lowercase = MaskFormerSwinBackbone(config=__a )
def UpperCamelCase_ ( self ):
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase = config_and_inputs
_lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
a : Any = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
a : List[Any] = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
a : Dict = False
a : Optional[int] = False
a : Any = False
a : Any = False
a : Optional[int] = False
def UpperCamelCase_ ( self ):
_lowercase = MaskFormerSwinModelTester(self )
_lowercase = ConfigTester(self , config_class=__a , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self ):
return
def UpperCamelCase_ ( self ):
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def UpperCamelCase_ ( self ):
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
@unittest.skip("""Swin does not use inputs_embeds""" )
def UpperCamelCase_ ( self ):
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def UpperCamelCase_ ( self ):
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__a )
_lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def UpperCamelCase_ ( self ):
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
_lowercase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_lowercase = model(**self._prepare_for_class(__a , __a ) )
_lowercase = outputs.hidden_states
_lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# Swin has a different seq_length
_lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase_ ( self ):
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_lowercase = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase = True
self.check_hidden_states_output(__a , __a , __a , __a )
def UpperCamelCase_ ( self ):
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = 3
_lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowercase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowercase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_lowercase = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def UpperCamelCase_ ( self ):
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def UpperCamelCase_ ( self ):
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__UpperCamelCase ):
_lowercase = 0
return t
def check_equivalence(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase={} ):
with torch.no_grad():
_lowercase = model(**__a , return_dict=__a , **__a )
_lowercase = model(**__a , return_dict=__a , **__a ).to_tuple()
def recursive_check(__UpperCamelCase , __UpperCamelCase ):
if isinstance(__a , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__a , __a ):
recursive_check(__a , __a )
elif isinstance(__a , __a ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(__a , __a )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__a ) , set_nan_tensor_to_zero(__a ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(__a ).any()} and `inf`: {torch.isinf(__a )}. Dict has"""
f""" `nan`: {torch.isnan(__a ).any()} and `inf`: {torch.isinf(__a )}."""
) , )
recursive_check(__a , __a )
for model_class in self.all_model_classes:
_lowercase = model_class(__a )
model.to(__a )
model.eval()
_lowercase = self._prepare_for_class(__a , __a )
_lowercase = self._prepare_for_class(__a , __a )
check_equivalence(__a , __a , __a )
_lowercase = self._prepare_for_class(__a , __a , return_labels=__a )
_lowercase = self._prepare_for_class(__a , __a , return_labels=__a )
check_equivalence(__a , __a , __a )
_lowercase = self._prepare_for_class(__a , __a )
_lowercase = self._prepare_for_class(__a , __a )
check_equivalence(__a , __a , __a , {"""output_hidden_states""": True} )
_lowercase = self._prepare_for_class(__a , __a , return_labels=__a )
_lowercase = self._prepare_for_class(__a , __a , return_labels=__a )
check_equivalence(__a , __a , __a , {"""output_hidden_states""": True} )
@require_torch
class a_ ( unittest.TestCase , lowerCamelCase_ ):
a : List[str] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
a : Optional[Any] = MaskFormerSwinConfig
def UpperCamelCase_ ( self ):
_lowercase = MaskFormerSwinModelTester(self )
def UpperCamelCase_ ( self ):
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
_lowercase = backbone_class(__a )
backbone.to(__a )
backbone.eval()
_lowercase = backbone(**__a )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __a )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_lowercase = backbone(**__a , output_hidden_states=__a )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_lowercase , _lowercase , _lowercase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_lowercase = backbone(**__a , output_attentions=__a )
self.assertIsNotNone(outputs.attentions ) | 287 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowerCamelCase ( lowercase : Any ) -> Tuple:
_a = filter(lambda lowercase : p.requires_grad , model.parameters() )
_a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase_ : str = logging.getLogger(__name__)
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Union[str, Any] ) -> Dict:
if metric == "rouge2":
_a = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_a = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_a = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
_a = ModelCheckpoint(
dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : List[str] ) -> Dict:
return EarlyStopping(
monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , )
class __SCREAMING_SNAKE_CASE (pl.Callback ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[Any] , __a : Dict , __a : Optional[int] ):
_a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Any , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Optional[int]=True ):
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_a = Path(pl_module.hparams.output_dir )
if type_path == "test":
_a = od / "test_results.txt"
_a = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_a = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , "a+" ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
_a = metrics[key]
if isinstance(__a , torch.Tensor ):
_a = val.item()
_a = f'{key}: {val:.6f}\n'
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
_a = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__a )
@rank_zero_only
def UpperCamelCase__ ( self : List[str] , __a : Optional[Any] , __a : List[str] ):
try:
_a = pl_module.model.model.num_parameters()
except AttributeError:
_a = pl_module.model.num_parameters()
_a = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase__ ( self : Dict , __a : pl.Trainer , __a : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , "test" )
@rank_zero_only
def UpperCamelCase__ ( self : Any , __a : pl.Trainer , __a : Optional[int] ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 692 | 0 |
"""simple docstring"""
import os
from distutils.util import strtobool
def __lowercase ( _a , _a ):
for e in env_keys:
snake_case_ : Union[str, Any] = int(os.environ.get(_a , -1 ) )
if val >= 0:
return val
return default
def __lowercase ( _a , _a=False ):
snake_case_ : Dict = os.environ.get(_a , str(_a ) )
return strtobool(_a ) == 1 # As its name indicates `strtobool` actually returns an int...
def __lowercase ( _a , _a="no" ):
snake_case_ : str = os.environ.get(_a , str(_a ) )
return value
| 123 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ : Any = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[str] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 692 | 0 |
A = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.602_176_634e-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.35_58_18,
}
def lowerCamelCase ( UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : float ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
_lowerCamelCase = (
F"""Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {", ".join(UpperCamelCase )}"""
)
raise ValueError(UpperCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod() | 544 |
'''simple docstring'''
import gc
import threading
import time
import psutil
import torch
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[Any] ):
_a = psutil.Process()
_a = False
def UpperCamelCase__ ( self : Tuple ):
_a = -1
while True:
_a = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def UpperCamelCase__ ( self : List[Any] ):
_a = True
_a = threading.Thread(target=self.peak_monitor )
_a = True
self.thread.start()
def UpperCamelCase__ ( self : Optional[int] ):
_a = False
self.thread.join()
return self.cpu_memory_peak
lowerCAmelCase_ : List[Any] = PeakCPUMemory()
def _lowerCamelCase ( ) -> Tuple:
# Time
_a = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_a = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
_a = torch.cuda.memory_allocated(lowercase )
torch.cuda.reset_peak_memory_stats()
return measures
def _lowerCamelCase ( lowercase : Any ) -> int:
# Time
_a = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_a = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
_a = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
_a = (torch.cuda.memory_allocated(lowercase ) - start_measures[str(lowercase )]) / 2**20
_a = (torch.cuda.max_memory_allocated(lowercase ) - start_measures[str(lowercase )]) / 2**20
return measures
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Dict ) -> str:
print(F'{description}:' )
print(F'- Time: {measures["time"]:.2f}s' )
for i in range(torch.cuda.device_count() ):
print(F'- GPU {i} allocated: {measures[str(lowercase )]:.2f}MiB' )
_a = measures[F'{i}-peak']
print(F'- GPU {i} peak: {peak:.2f}MiB' )
print(F'- CPU RAM allocated: {measures["cpu"]:.2f}MiB' )
print(F'- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB' )
| 692 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCamelCase( lowerCamelCase_ , unittest.TestCase ):
snake_case_ : Union[str, Any] = ShapEPipeline
snake_case_ : Union[str, Any] = ["""prompt"""]
snake_case_ : List[str] = ["""prompt"""]
snake_case_ : int = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
snake_case_ : Union[str, Any] = False
@property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Dict:
'''simple docstring'''
return 3_2
@property
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
return 3_2
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Dict:
'''simple docstring'''
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Dict:
'''simple docstring'''
return 8
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(__a )
@property
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = {
"num_attention_heads": 2,
"attention_head_dim": 1_6,
"embedding_dim": self.time_input_dim,
"num_embeddings": 3_2,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
__snake_case = PriorTransformer(**__a )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = {
"param_shapes": (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 1_2,
"background": (
0.1,
0.1,
0.1,
),
}
__snake_case = ShapERenderer(**__a )
return model
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Dict:
'''simple docstring'''
__snake_case = self.dummy_prior
__snake_case = self.dummy_text_encoder
__snake_case = self.dummy_tokenizer
__snake_case = self.dummy_renderer
__snake_case = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1_0_2_4 , prediction_type="sample" , use_karras_sigmas=__a , clip_sample=__a , clip_sample_range=1.0 , )
__snake_case = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any]=0 ) -> Optional[Any]:
'''simple docstring'''
if str(__a ).startswith("mps" ):
__snake_case = torch.manual_seed(__a )
else:
__snake_case = torch.Generator(device=__a ).manual_seed(__a )
__snake_case = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 3_2,
"output_type": "np",
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
__snake_case = "cpu"
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**__a )
__snake_case = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case = pipe(**self.get_dummy_inputs(__a ) )
__snake_case = output.images[0]
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
__snake_case = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> str:
'''simple docstring'''
__snake_case = torch_device == "cpu"
__snake_case = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__a , relax_max_difference=__a , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**__a )
__snake_case = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case = 1
__snake_case = 2
__snake_case = self.get_dummy_inputs(__a )
for key in inputs.keys():
if key in self.batch_params:
__snake_case = batch_size * [inputs[key]]
__snake_case = pipe(**__a , num_images_per_prompt=__a )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCamelCase( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
__snake_case = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy" )
__snake_case = ShapEPipeline.from_pretrained("openai/shap-e" )
__snake_case = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case = torch.Generator(device=__a ).manual_seed(0 )
__snake_case = pipe(
"a shark" , generator=__a , guidance_scale=15.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type="np" , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(__a , __a )
| 371 |
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =(DDIMParallelScheduler,)
__a =(('eta', 0.0), ('num_inference_steps', 50))
def UpperCamelCase__ ( self : Optional[int] , **__a : Any ):
_a = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**__a )
return config
def UpperCamelCase__ ( self : List[str] , **__a : Optional[int] ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(**__a )
_a = scheduler_class(**__a )
_a , _a = 10, 0.0
_a = self.dummy_model()
_a = self.dummy_sample_deter
scheduler.set_timesteps(__a )
for t in scheduler.timesteps:
_a = model(__a , __a )
_a = scheduler.step(__a , __a , __a , __a ).prev_sample
return sample
def UpperCamelCase__ ( self : str ):
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=__a )
def UpperCamelCase__ ( self : Dict ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__a )
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(steps_offset=1 )
_a = scheduler_class(**__a )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def UpperCamelCase__ ( self : Tuple ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def UpperCamelCase__ ( self : Dict ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__a )
def UpperCamelCase__ ( self : Tuple ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def UpperCamelCase__ ( self : Dict ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__a )
def UpperCamelCase__ ( self : Optional[int] ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__a )
def UpperCamelCase__ ( self : Optional[Any] ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__a )
def UpperCamelCase__ ( self : List[Any] ):
self.check_over_configs(thresholding=__a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__a , prediction_type=__a , sample_max_value=__a , )
def UpperCamelCase__ ( self : List[Any] ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=__a )
def UpperCamelCase__ ( self : Union[str, Any] ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=__a , num_inference_steps=__a )
def UpperCamelCase__ ( self : Union[str, Any] ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__a , eta=__a )
def UpperCamelCase__ ( self : Optional[int] ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__a )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.14771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.32460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5
def UpperCamelCase__ ( self : List[str] ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__a )
_a , _a = 10, 0.0
scheduler.set_timesteps(__a )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = self.dummy_sample_deter + 0.1
_a = self.dummy_sample_deter - 0.1
_a = samplea.shape[0]
_a = torch.stack([samplea, samplea, samplea] , dim=0 )
_a = torch.arange(__a )[0:3, None].repeat(1 , __a )
_a = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_a = scheduler.batch_step_no_noise(__a , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __a )
_a = torch.sum(torch.abs(__a ) )
_a = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 1147.7904 ) < 1e-2
assert abs(result_mean.item() - 0.4982 ) < 1e-3
def UpperCamelCase__ ( self : List[str] ):
_a = self.full_loop()
_a = torch.sum(torch.abs(__a ) )
_a = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 172.0067 ) < 1e-2
assert abs(result_mean.item() - 0.223967 ) < 1e-3
def UpperCamelCase__ ( self : str ):
_a = self.full_loop(prediction_type="v_prediction" )
_a = torch.sum(torch.abs(__a ) )
_a = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 52.5302 ) < 1e-2
assert abs(result_mean.item() - 0.0684 ) < 1e-3
def UpperCamelCase__ ( self : str ):
# We specify different beta, so that the first alpha is 0.99
_a = self.full_loop(set_alpha_to_one=__a , beta_start=0.01 )
_a = torch.sum(torch.abs(__a ) )
_a = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 149.8295 ) < 1e-2
assert abs(result_mean.item() - 0.1951 ) < 1e-3
def UpperCamelCase__ ( self : str ):
# We specify different beta, so that the first alpha is 0.99
_a = self.full_loop(set_alpha_to_one=__a , beta_start=0.01 )
_a = torch.sum(torch.abs(__a ) )
_a = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 149.0784 ) < 1e-2
assert abs(result_mean.item() - 0.1941 ) < 1e-3
| 692 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ : Optional[Any] = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : List[Any] = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowercase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 304 |
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _lowerCamelCase ( lowercase : Any ) -> List[str]:
return getitem, k
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Union[str, Any] ) -> Any:
return setitem, k, v
def _lowerCamelCase ( lowercase : int ) -> Union[str, Any]:
return delitem, k
def _lowerCamelCase ( lowercase : Tuple , lowercase : Dict , *lowercase : Union[str, Any] ) -> int:
try:
return fun(lowercase , *lowercase ), None
except Exception as e:
return None, e
lowerCAmelCase_ : Optional[Any] = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
lowerCAmelCase_ : Optional[int] = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
lowerCAmelCase_ : int = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
lowerCAmelCase_ : List[Any] = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
lowerCAmelCase_ : str = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCAmelCase_ : str = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def _lowerCamelCase ( lowercase : Optional[int] ) -> Optional[int]:
_a = HashMap(initial_block_size=4 )
_a = {}
for _, (fun, *args) in enumerate(lowercase ):
_a , _a = _run_operation(lowercase , lowercase , *lowercase )
_a , _a = _run_operation(lowercase , lowercase , *lowercase )
assert my_res == py_res
assert str(lowercase ) == str(lowercase )
assert set(lowercase ) == set(lowercase )
assert len(lowercase ) == len(lowercase )
assert set(my.items() ) == set(py.items() )
def _lowerCamelCase ( ) -> str:
def is_public(lowercase : str ) -> bool:
return not name.startswith("_" )
_a = {name for name in dir({} ) if is_public(lowercase )}
_a = {name for name in dir(HashMap() ) if is_public(lowercase )}
assert dict_public_names > hash_public_names
| 692 | 0 |
SCREAMING_SNAKE_CASE__ : Optional[int] = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 79 |
'''simple docstring'''
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =PhobertTokenizer
__a =False
def UpperCamelCase__ ( self : int ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a = ["T@@", "i", "I", "R@@", "r", "e@@"]
_a = dict(zip(__a , range(len(__a ) ) ) )
_a = ["#version: 0.2", "l ร </w>"]
_a = {"unk_token": "<unk>"}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(f'{token} {vocab_tokens[token]}\n' )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__a ) )
def UpperCamelCase__ ( self : str , **__a : List[str] ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **__a )
def UpperCamelCase__ ( self : Optional[Any] , __a : Optional[int] ):
_a = "Tรดi lร VinAI Research"
_a = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"
return input_text, output_text
def UpperCamelCase__ ( self : Dict ):
_a = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a = "Tรดi lร VinAI Research"
_a = "T@@ รด@@ i l@@ ร V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split()
_a = tokenizer.tokenize(__a )
print(__a )
self.assertListEqual(__a , __a )
_a = tokens + [tokenizer.unk_token]
_a = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
| 692 | 0 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
# TODO Update this
__A = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class _lowerCAmelCase ( lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :str = """esm"""
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=7_6_8 , __UpperCAmelCase=1_2 , __UpperCAmelCase=1_2 , __UpperCAmelCase=3_0_7_2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1_0_2_6 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__a , mask_token_id=__a , **__a )
lowerCAmelCase__ :Any = vocab_size
lowerCAmelCase__ :List[Any] = hidden_size
lowerCAmelCase__ :int = num_hidden_layers
lowerCAmelCase__ :str = num_attention_heads
lowerCAmelCase__ :str = intermediate_size
lowerCAmelCase__ :Any = hidden_dropout_prob
lowerCAmelCase__ :Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase__ :Any = max_position_embeddings
lowerCAmelCase__ :Any = initializer_range
lowerCAmelCase__ :List[str] = layer_norm_eps
lowerCAmelCase__ :int = position_embedding_type
lowerCAmelCase__ :str = use_cache
lowerCAmelCase__ :List[str] = emb_layer_norm_before
lowerCAmelCase__ :Any = token_dropout
lowerCAmelCase__ :Tuple = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
lowerCAmelCase__ :List[Any] = EsmFoldConfig()
elif isinstance(__a , __a ):
lowerCAmelCase__ :Dict = EsmFoldConfig(**__a )
lowerCAmelCase__ :List[str] = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
lowerCAmelCase__ :Optional[int] = get_default_vocab_list()
else:
lowerCAmelCase__ :List[str] = vocab_list
else:
lowerCAmelCase__ :Any = None
lowerCAmelCase__ :Optional[Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , __a ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = super().to_dict()
if isinstance(self.esmfold_config , __a ):
lowerCAmelCase__ :Any = self.esmfold_config.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :List[Any] = None
__magic_name__ :int = True
__magic_name__ :Dict = False
__magic_name__ :Any = False
__magic_name__ :Dict = False
__magic_name__ :Union[str, Any] = 0
__magic_name__ :Optional[Any] = True
__magic_name__ :Dict = False
__magic_name__ :Any = 128
__magic_name__ :Dict = None
def snake_case ( self ):
'''simple docstring'''
if self.trunk is None:
lowerCAmelCase__ :Optional[Any] = TrunkConfig()
elif isinstance(self.trunk , __a ):
lowerCAmelCase__ :Any = TrunkConfig(**self.trunk )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = asdict(self )
lowerCAmelCase__ :str = self.trunk.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :Union[str, Any] = 48
__magic_name__ :Optional[int] = 1_024
__magic_name__ :List[str] = 128
__magic_name__ :List[str] = 32
__magic_name__ :str = 32
__magic_name__ :Any = 32
__magic_name__ :int = 0
__magic_name__ :Optional[int] = 0
__magic_name__ :Tuple = False
__magic_name__ :Optional[int] = 4
__magic_name__ :List[str] = 128
__magic_name__ :Union[str, Any] = None
def snake_case ( self ):
'''simple docstring'''
if self.structure_module is None:
lowerCAmelCase__ :int = StructureModuleConfig()
elif isinstance(self.structure_module , __a ):
lowerCAmelCase__ :int = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
lowerCAmelCase__ :Optional[Any] = self.sequence_state_dim // self.sequence_head_width
lowerCAmelCase__ :Optional[Any] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = asdict(self )
lowerCAmelCase__ :Union[str, Any] = self.structure_module.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :str = 384
__magic_name__ :Dict = 128
__magic_name__ :Optional[int] = 16
__magic_name__ :Optional[int] = 128
__magic_name__ :Dict = 12
__magic_name__ :List[Any] = 4
__magic_name__ :int = 8
__magic_name__ :Tuple = 0.1
__magic_name__ :List[str] = 8
__magic_name__ :Any = 1
__magic_name__ :Any = 2
__magic_name__ :Optional[int] = 7
__magic_name__ :Dict = 10
__magic_name__ :Tuple = 1e-8
__magic_name__ :List[Any] = 1e5
def snake_case ( self ):
'''simple docstring'''
return asdict(self )
def __A () ->str:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 93 |
'''simple docstring'''
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : str , *__a : Any , __a : str=None , __a : Union[str, Any]=None , **__a : Any ):
super().__init__(*__a , **__a )
_a = eval_examples
_a = post_process_function
def UpperCamelCase__ ( self : Optional[Any] , __a : Dict=None , __a : Any=None , __a : str=None , __a : str = "eval" ):
_a = self.eval_dataset if eval_dataset is None else eval_dataset
_a = self.get_eval_dataloader(__a )
_a = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_a = self.compute_metrics
_a = None
_a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_a = time.time()
try:
_a = eval_loop(
__a , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__a , metric_key_prefix=__a , )
finally:
_a = compute_metrics
_a = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__a , __a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_a = self.post_process_function(__a , __a , output.predictions )
_a = self.compute_metrics(__a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
_a = metrics.pop(__a )
metrics.update(output.metrics )
else:
_a = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__a )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_a = self.callback_handler.on_evaluate(self.args , self.state , self.control , __a )
return metrics
def UpperCamelCase__ ( self : Tuple , __a : Dict , __a : Optional[Any] , __a : Optional[Any]=None , __a : str = "test" ):
_a = self.get_test_dataloader(__a )
# Temporarily disable metric computation, we will do it in the loop here.
_a = self.compute_metrics
_a = None
_a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_a = time.time()
try:
_a = eval_loop(
__a , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__a , metric_key_prefix=__a , )
finally:
_a = compute_metrics
_a = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__a , __a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_a = self.post_process_function(__a , __a , output.predictions , "predict" )
_a = self.compute_metrics(__a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
_a = metrics.pop(__a )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__a )
| 692 | 0 |
"""simple docstring"""
lowerCAmelCase__ = range(2, 20 + 1)
lowerCAmelCase__ = [10**k for k in range(ks[-1] + 1)]
lowerCAmelCase__ = {}
def snake_case_ ( A_ : Tuple, A_ : Union[str, Any], A_ : Union[str, Any], A_ : Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = sum(a_i[j] for j in range(A_, len(A_ ) ) )
_lowerCamelCase : Any = sum(a_i[j] * base[j] for j in range(min(len(A_ ), A_ ) ) )
_lowerCamelCase , _lowerCamelCase : Optional[int] = 0, 0
_lowerCamelCase : Optional[Any] = n - i
_lowerCamelCase : Dict = memo.get(A_ )
if sub_memo is not None:
_lowerCamelCase : List[str] = sub_memo.get(A_ )
if jumps is not None and len(A_ ) > 0:
# find and make the largest jump without going over
_lowerCamelCase : Any = -1
for _k in range(len(A_ ) - 1, -1, -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_lowerCamelCase : List[str] = _k
break
if max_jump >= 0:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = jumps[max_jump]
# since the difference between jumps is cached, add c
_lowerCamelCase : str = diff + c
for j in range(min(A_, len(A_ ) ) ):
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = divmod(A_, 10 )
if new_c > 0:
add(A_, A_, A_ )
else:
_lowerCamelCase : Dict = []
else:
_lowerCamelCase : str = {c: []}
_lowerCamelCase : List[str] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_lowerCamelCase , _lowerCamelCase : Any = next_term(A_, k - 1, i + dn, A_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_lowerCamelCase , _lowerCamelCase : Dict = compute(A_, A_, i + dn, A_ )
diff += _diff
dn += terms_jumped
_lowerCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_lowerCamelCase : List[Any] = 0
while j < len(A_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(A_, (diff, dn, k) )
return (diff, dn)
def snake_case_ ( A_ : int, A_ : Any, A_ : List[str], A_ : Optional[int] ):
'''simple docstring'''
if i >= n:
return 0, i
if k > len(A_ ):
a_i.extend([0 for _ in range(k - len(A_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_lowerCamelCase : Dict = i
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[int] = 0, 0, 0
for j in range(len(A_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_lowerCamelCase : Tuple = ds_c + ds_b
diff += addend
_lowerCamelCase : Any = 0
for j in range(A_ ):
_lowerCamelCase : List[Any] = a_i[j] + addend
_lowerCamelCase , _lowerCamelCase : str = divmod(A_, 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(A_, A_, A_ )
return diff, i - start_i
def snake_case_ ( A_ : Optional[int], A_ : Tuple, A_ : List[str] ):
'''simple docstring'''
for j in range(A_, len(A_ ) ):
_lowerCamelCase : Union[str, Any] = digits[j] + addend
if s >= 10:
_lowerCamelCase , _lowerCamelCase : List[str] = divmod(A_, 10 )
_lowerCamelCase : Tuple = addend // 10 + quotient
else:
_lowerCamelCase : Union[str, Any] = s
_lowerCamelCase : Union[str, Any] = addend // 10
if addend == 0:
break
while addend > 0:
_lowerCamelCase , _lowerCamelCase : List[Any] = divmod(A_, 10 )
digits.append(A_ )
def snake_case_ ( A_ : int = 10**15 ):
'''simple docstring'''
_lowerCamelCase : List[Any] = [1]
_lowerCamelCase : List[str] = 1
_lowerCamelCase : Any = 0
while True:
_lowerCamelCase , _lowerCamelCase : Dict = next_term(A_, 20, i + dn, A_ )
dn += terms_jumped
if dn == n - i:
break
_lowerCamelCase : Optional[int] = 0
for j in range(len(A_ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 83 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , *__a : Dict , **__a : List[Any] ):
warnings.warn(
"The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ChineseCLIPImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 692 | 0 |
'''simple docstring'''
from __future__ import annotations
A_ = [True] * 1000001
A_ = 2
while i * i <= 1000000:
if seive[i]:
for j in range(i * i, 1000001, i):
A_ = False
i += 1
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ) -> bool:
return seive[n]
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ) -> bool:
return any(digit in '02468' for digit in str(__SCREAMING_SNAKE_CASE ) )
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE = 100_0000 ) -> list[int]:
snake_case__ : Union[str, Any] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__SCREAMING_SNAKE_CASE ) and not contains_an_even_digit(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = str(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = [int(str_num[j:] + str_num[:j] ) for j in range(len(__SCREAMING_SNAKE_CASE ) )]
if all(is_prime(__SCREAMING_SNAKE_CASE ) for i in list_nums ):
result.append(__SCREAMING_SNAKE_CASE )
return result
def UpperCamelCase__ ( ) -> int:
return len(find_circular_primes() )
if __name__ == "__main__":
print(F'{len(find_circular_primes()) = }')
| 270 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , __a : int , __a : int , __a : int , __a : str=0.0 , __a : Optional[int] = None , __a : str = "geglu" , __a : Optional[int] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : str = "layer_norm" , __a : bool = False , ):
super().__init__()
_a = only_cross_attention
_a = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
_a = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'
f' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
_a = AdaLayerNorm(__a , __a )
elif self.use_ada_layer_norm_zero:
_a = AdaLayerNormZero(__a , __a )
else:
_a = nn.LayerNorm(__a , elementwise_affine=__a )
_a = Attention(
query_dim=__a , heads=__a , dim_head=__a , dropout=__a , bias=__a , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__a , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
_a = (
AdaLayerNorm(__a , __a )
if self.use_ada_layer_norm
else nn.LayerNorm(__a , elementwise_affine=__a )
)
_a = Attention(
query_dim=__a , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__a , dim_head=__a , dropout=__a , bias=__a , upcast_attention=__a , ) # is self-attn if encoder_hidden_states is none
else:
_a = None
_a = None
# 3. Feed-forward
_a = nn.LayerNorm(__a , elementwise_affine=__a )
_a = FeedForward(__a , dropout=__a , activation_fn=__a , final_dropout=__a )
# let chunk size default to None
_a = None
_a = 0
def UpperCamelCase__ ( self : int , __a : Optional[int] , __a : int ):
# Sets chunk feed-forward
_a = chunk_size
_a = dim
def UpperCamelCase__ ( self : List[str] , __a : torch.FloatTensor , __a : Optional[torch.FloatTensor] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[torch.LongTensor] = None , __a : Dict[str, Any] = None , __a : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
_a = self.norma(__a , __a )
elif self.use_ada_layer_norm_zero:
_a , _a , _a , _a , _a = self.norma(
__a , __a , __a , hidden_dtype=hidden_states.dtype )
else:
_a = self.norma(__a )
_a = cross_attention_kwargs if cross_attention_kwargs is not None else {}
_a = self.attna(
__a , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__a , **__a , )
if self.use_ada_layer_norm_zero:
_a = gate_msa.unsqueeze(1 ) * attn_output
_a = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
_a = (
self.norma(__a , __a ) if self.use_ada_layer_norm else self.norma(__a )
)
_a = self.attna(
__a , encoder_hidden_states=__a , attention_mask=__a , **__a , )
_a = attn_output + hidden_states
# 3. Feed-forward
_a = self.norma(__a )
if self.use_ada_layer_norm_zero:
_a = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.' )
_a = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
_a = torch.cat(
[self.ff(__a ) for hid_slice in norm_hidden_states.chunk(__a , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
_a = self.ff(__a )
if self.use_ada_layer_norm_zero:
_a = gate_mlp.unsqueeze(1 ) * ff_output
_a = ff_output + hidden_states
return hidden_states
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , __a : int , __a : Optional[int] = None , __a : int = 4 , __a : float = 0.0 , __a : str = "geglu" , __a : bool = False , ):
super().__init__()
_a = int(dim * mult )
_a = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
_a = GELU(__a , __a )
if activation_fn == "gelu-approximate":
_a = GELU(__a , __a , approximate="tanh" )
elif activation_fn == "geglu":
_a = GEGLU(__a , __a )
elif activation_fn == "geglu-approximate":
_a = ApproximateGELU(__a , __a )
_a = nn.ModuleList([] )
# project in
self.net.append(__a )
# project dropout
self.net.append(nn.Dropout(__a ) )
# project out
self.net.append(nn.Linear(__a , __a ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__a ) )
def UpperCamelCase__ ( self : List[Any] , __a : Tuple ):
for module in self.net:
_a = module(__a )
return hidden_states
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self : int , __a : int , __a : int , __a : str = "none" ):
super().__init__()
_a = nn.Linear(__a , __a )
_a = approximate
def UpperCamelCase__ ( self : Union[str, Any] , __a : List[Any] ):
if gate.device.type != "mps":
return F.gelu(__a , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCamelCase__ ( self : str , __a : Optional[int] ):
_a = self.proj(__a )
_a = self.gelu(__a )
return hidden_states
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self : str , __a : int , __a : int ):
super().__init__()
_a = nn.Linear(__a , dim_out * 2 )
def UpperCamelCase__ ( self : List[Any] , __a : Optional[int] ):
if gate.device.type != "mps":
return F.gelu(__a )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCamelCase__ ( self : List[str] , __a : Any ):
_a , _a = self.proj(__a ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(__a )
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , __a : int , __a : int ):
super().__init__()
_a = nn.Linear(__a , __a )
def UpperCamelCase__ ( self : Union[str, Any] , __a : Dict ):
_a = self.proj(__a )
return x * torch.sigmoid(1.702 * x )
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self : int , __a : str , __a : str ):
super().__init__()
_a = nn.Embedding(__a , __a )
_a = nn.SiLU()
_a = nn.Linear(__a , embedding_dim * 2 )
_a = nn.LayerNorm(__a , elementwise_affine=__a )
def UpperCamelCase__ ( self : Tuple , __a : Any , __a : Optional[Any] ):
_a = self.linear(self.silu(self.emb(__a ) ) )
_a , _a = torch.chunk(__a , 2 )
_a = self.norm(__a ) * (1 + scale) + shift
return x
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , __a : List[Any] , __a : Any ):
super().__init__()
_a = CombinedTimestepLabelEmbeddings(__a , __a )
_a = nn.SiLU()
_a = nn.Linear(__a , 6 * embedding_dim , bias=__a )
_a = nn.LayerNorm(__a , elementwise_affine=__a , eps=1e-6 )
def UpperCamelCase__ ( self : Optional[Any] , __a : Dict , __a : List[Any] , __a : Union[str, Any] , __a : List[Any]=None ):
_a = self.linear(self.silu(self.emb(__a , __a , hidden_dtype=__a ) ) )
_a , _a , _a , _a , _a , _a = emb.chunk(6 , dim=1 )
_a = self.norm(__a ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , __a : int , __a : int , __a : int , __a : Optional[str] = None , __a : float = 1e-5 ):
super().__init__()
_a = num_groups
_a = eps
if act_fn is None:
_a = None
else:
_a = get_activation(__a )
_a = nn.Linear(__a , out_dim * 2 )
def UpperCamelCase__ ( self : List[Any] , __a : Optional[Any] , __a : List[Any] ):
if self.act:
_a = self.act(__a )
_a = self.linear(__a )
_a = emb[:, :, None, None]
_a , _a = emb.chunk(2 , dim=1 )
_a = F.group_norm(__a , self.num_groups , eps=self.eps )
_a = x * (1 + scale) + shift
return x
| 692 | 0 |
"""simple docstring"""
import math
def a ( __UpperCAmelCase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( __UpperCAmelCase : float = 0.1 ) -> int:
__magic_name__: Union[str, Any] = 3
__magic_name__: Union[str, Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__UpperCAmelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 96 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =42
__a =42
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : int ):
_a = [[] for _ in range(__a )]
_a = size
def __getitem__( self : int , __a : int ):
return iter(self._graph[vertex] )
@property
def UpperCamelCase__ ( self : Dict ):
return self._size
def UpperCamelCase__ ( self : Union[str, Any] , __a : int , __a : int , __a : int ):
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1." )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size)." )
self._graph[from_vertex].append(Edge(__a , __a ) )
def UpperCamelCase__ ( self : Tuple , __a : int , __a : int ):
_a = deque([start_vertex] )
_a = [None] * self.size
_a = 0
while queue:
_a = queue.popleft()
_a = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_a = current_distance + edge.weight
_a = distances[edge.destination_vertex]
if (
isinstance(__a , __a )
and new_distance >= dest_vertex_distance
):
continue
_a = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex." )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 692 | 0 |
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ (lowerCamelCase_ ):
"""simple docstring"""
UpperCamelCase_ : List[Any] = (KDPMaDiscreteScheduler,)
UpperCamelCase_ : Dict = 10
def a ( self : Optional[int] , **a_ : Any )-> str:
"""simple docstring"""
UpperCAmelCase_ : List[str] = {
"""num_train_timesteps""": 11_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__a )
return config
def a ( self : Any )-> int:
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__a )
def a ( self : Any )-> Optional[int]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def a ( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def a ( self : List[str] )-> Dict:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def a ( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.scheduler_classes[0]
UpperCAmelCase_ : List[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
UpperCAmelCase_ : int = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase_ : List[Any] = self.dummy_model()
UpperCAmelCase_ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase_ : int = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ : Optional[Any] = scheduler.scale_model_input(__a , __a )
UpperCAmelCase_ : Dict = model(__a , __a )
UpperCAmelCase_ : Tuple = scheduler.step(__a , __a , __a )
UpperCAmelCase_ : Dict = output.prev_sample
UpperCAmelCase_ : int = torch.sum(torch.abs(__a ) )
UpperCAmelCase_ : str = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693428650170972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def a ( self : List[str] )-> Tuple:
"""simple docstring"""
if torch_device == "mps":
return
UpperCAmelCase_ : Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config()
UpperCAmelCase_ : Optional[Any] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase_ : Optional[int] = self.dummy_model()
UpperCAmelCase_ : int = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase_ : List[str] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ : Tuple = scheduler.scale_model_input(__a , __a )
UpperCAmelCase_ : List[Any] = model(__a , __a )
UpperCAmelCase_ : Union[str, Any] = scheduler.step(__a , __a , __a )
UpperCAmelCase_ : Optional[int] = output.prev_sample
UpperCAmelCase_ : Dict = torch.sum(torch.abs(__a ) )
UpperCAmelCase_ : List[str] = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def a ( self : Union[str, Any] )-> Any:
"""simple docstring"""
if torch_device == "mps":
return
UpperCAmelCase_ : Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase_ : Optional[int] = self.get_scheduler_config()
UpperCAmelCase_ : Dict = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
UpperCAmelCase_ : str = self.dummy_model()
UpperCAmelCase_ : Optional[Any] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCAmelCase_ : List[str] = scheduler.scale_model_input(__a , __a )
UpperCAmelCase_ : Optional[int] = model(__a , __a )
UpperCAmelCase_ : str = scheduler.step(__a , __a , __a )
UpperCAmelCase_ : Any = output.prev_sample
UpperCAmelCase_ : Tuple = torch.sum(torch.abs(__a ) )
UpperCAmelCase_ : Optional[Any] = torch.mean(torch.abs(__a ) )
if str(__a ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
| 470 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =FlaxAutoencoderKL
@property
def UpperCamelCase__ ( self : str ):
_a = 4
_a = 3
_a = (32, 32)
_a = jax.random.PRNGKey(0 )
_a = jax.random.uniform(__a , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def UpperCamelCase__ ( self : List[Any] ):
_a = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
_a = self.dummy_input
return init_dict, inputs_dict
| 692 | 0 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
A : str = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
A : int = get_tests_dir('''fixtures/vocab.json''')
A : Dict = get_tests_dir('''fixtures''')
class a_ ( unittest.TestCase ):
a : Dict = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def UpperCamelCase_ ( self ):
_lowercase = 0
def UpperCamelCase_ ( self ):
_lowercase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(__a , __a )
def UpperCamelCase_ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase = WavaVecaConfig()
_lowercase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(__a )
processor.save_pretrained(__a )
_lowercase = AutoProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase_ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__a , os.path.join(__a , __a ) )
copyfile(__a , os.path.join(__a , """vocab.json""" ) )
_lowercase = AutoProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase_ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase = WavaVecaFeatureExtractor()
_lowercase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
_lowercase = WavaVecaProcessor(__a , __a )
# save in new folder
processor.save_pretrained(__a )
# drop `processor_class` in tokenizer
with open(os.path.join(__a , __a ) , """r""" ) as f:
_lowercase = json.load(__a )
config_dict.pop("""processor_class""" )
with open(os.path.join(__a , __a ) , """w""" ) as f:
f.write(json.dumps(__a ) )
_lowercase = AutoProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase_ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase = WavaVecaFeatureExtractor()
_lowercase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
_lowercase = WavaVecaProcessor(__a , __a )
# save in new folder
processor.save_pretrained(__a )
# drop `processor_class` in feature extractor
with open(os.path.join(__a , __a ) , """r""" ) as f:
_lowercase = json.load(__a )
config_dict.pop("""processor_class""" )
with open(os.path.join(__a , __a ) , """w""" ) as f:
f.write(json.dumps(__a ) )
_lowercase = AutoProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase_ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(__a )
# copy relevant files
copyfile(__a , os.path.join(__a , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(__a , __a ) , """w""" ) as f:
f.write("""{}""" )
_lowercase = AutoProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase_ ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
_lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
_lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__a )
_lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__a )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
_lowercase = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
_lowercase = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
_lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__a , use_fast=__a )
_lowercase = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def UpperCamelCase_ ( self ):
try:
AutoConfig.register("""custom""" , __a )
AutoFeatureExtractor.register(__a , __a )
AutoTokenizer.register(__a , slow_tokenizer_class=__a )
AutoProcessor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoProcessor.register(__a , __a )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowercase = CustomFeatureExtractor.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowercase = os.path.join(__a , """vocab.txt""" )
with open(__a , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
_lowercase = CustomTokenizer(__a )
_lowercase = CustomProcessor(__a , __a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__a )
_lowercase = AutoProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase_ ( self ):
class a_ ( lowerCamelCase_ ):
a : Optional[int] = False
class a_ ( lowerCamelCase_ ):
a : Union[str, Any] = False
class a_ ( lowerCamelCase_ ):
a : Optional[int] = '''AutoFeatureExtractor'''
a : Dict = '''AutoTokenizer'''
a : List[str] = False
try:
AutoConfig.register("""custom""" , __a )
AutoFeatureExtractor.register(__a , __a )
AutoTokenizer.register(__a , slow_tokenizer_class=__a )
AutoProcessor.register(__a , __a )
# If remote code is not set, the default is to use local classes.
_lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
_lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__a )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
_lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__a )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase_ ( self ):
_lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def UpperCamelCase_ ( self ):
_lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class a_ ( unittest.TestCase ):
a : str = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def UpperCamelCase_ ( cls ):
_lowercase = TOKEN
HfFolder.save_token(__a )
@classmethod
def UpperCamelCase_ ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def UpperCamelCase_ ( self ):
_lowercase = WavaVecaProcessor.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__a , """test-processor""" ) , push_to_hub=__a , use_auth_token=self._token )
_lowercase = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__a , getattr(new_processor.feature_extractor , __a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def UpperCamelCase_ ( self ):
_lowercase = WavaVecaProcessor.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__a , """test-processor-org""" ) , push_to_hub=__a , use_auth_token=self._token , organization="""valid_org""" , )
_lowercase = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__a , getattr(new_processor.feature_extractor , __a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def UpperCamelCase_ ( self ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
_lowercase = CustomFeatureExtractor.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowercase = os.path.join(__a , """vocab.txt""" )
with open(__a , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
_lowercase = CustomTokenizer(__a )
_lowercase = CustomProcessor(__a , __a )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
_lowercase = Repository(__a , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(__a )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__a , """tokenizer_config.json""" ) ) as f:
_lowercase = json.load(__a )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__a , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__a , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__a , """custom_processing.py""" ) ) )
repo.push_to_hub()
_lowercase = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=__a )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" ) | 287 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
lowerCAmelCase_ : List[Any] = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
lowerCAmelCase_ : Optional[int] = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
lowerCAmelCase_ : Any = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
lowerCAmelCase_ : Tuple = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
lowerCAmelCase_ : Optional[int] = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def _lowerCamelCase ( lowercase : Any , lowercase : Any ) -> Optional[Any]:
for tf_name, hf_name in patterns:
_a = k.replace(lowercase , lowercase )
return k
def _lowerCamelCase ( lowercase : dict , lowercase : dict ) -> BigBirdPegasusForConditionalGeneration:
_a = BigBirdPegasusConfig(**lowercase )
_a = BigBirdPegasusForConditionalGeneration(lowercase )
_a = torch_model.state_dict()
_a = {}
# separating decoder weights
_a = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
_a = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items() , "tf -> hf conversion" ):
_a = [k.endswith(lowercase ) for ending in KEYS_TO_IGNORE]
if any(lowercase ):
continue
_a = DECODER_PATTERNS
_a = rename_state_dict_key(lowercase , lowercase )
if new_k not in state_dict:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
_a = v.T
_a = torch.from_numpy(lowercase )
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , "tf -> hf conversion" ):
_a = [k.endswith(lowercase ) for ending in KEYS_TO_IGNORE]
if any(lowercase ):
continue
_a = REMAINING_PATTERNS
_a = rename_state_dict_key(lowercase , lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
_a = v.T
_a = torch.from_numpy(lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
_a = mapping["model.embed_positions.weight"]
_a = mapping.pop("model.embed_positions.weight" )
_a , _a = torch_model.load_state_dict(lowercase , strict=lowercase )
_a = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def _lowerCamelCase ( lowercase : List[Any] ) -> Dict:
_a = tf.train.list_variables(lowercase )
_a = {}
_a = ["global_step"]
for name, shape in tqdm(lowercase , desc="converting tf checkpoint to dict" ):
_a = any(pat in name for pat in ignore_name )
if skip_key:
continue
_a = tf.train.load_variable(lowercase , lowercase )
_a = array
return tf_weights
def _lowerCamelCase ( lowercase : str , lowercase : str , lowercase : dict ) -> Union[str, Any]:
_a = get_tf_weights_as_numpy(lowercase )
_a = convert_bigbird_pegasus(lowercase , lowercase )
torch_model.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
lowerCAmelCase_ : Optional[Any] = parser.parse_args()
lowerCAmelCase_ : Optional[Any] = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 692 | 0 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __lowercase ( _a ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x2_0000 and cp <= 0x2_a6df) #
or (cp >= 0x2_a700 and cp <= 0x2_b73f) #
or (cp >= 0x2_b740 and cp <= 0x2_b81f) #
or (cp >= 0x2_b820 and cp <= 0x2_ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2_f800 and cp <= 0x2_fa1f) #
): #
return True
return False
def __lowercase ( _a ):
# word like '180' or '่บซ้ซ' or '็ฅ'
for char in word:
snake_case_ : Optional[Any] = ord(_a )
if not _is_chinese_char(_a ):
return 0
return 1
def __lowercase ( _a ):
snake_case_ : Optional[int] = set()
for token in tokens:
snake_case_ : str = len(_a ) > 1 and is_chinese(_a )
if chinese_word:
word_set.add(_a )
snake_case_ : Union[str, Any] = list(_a )
return word_list
def __lowercase ( _a , _a ):
if not chinese_word_set:
return bert_tokens
snake_case_ : Any = max([len(_a ) for w in chinese_word_set] )
snake_case_ : str = bert_tokens
snake_case_, snake_case_ : List[str] = 0, len(_a )
while start < end:
snake_case_ : Optional[Any] = True
if is_chinese(bert_word[start] ):
snake_case_ : Dict = min(end - start , _a )
for i in range(_a , 1 , -1 ):
snake_case_ : Optional[int] = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
snake_case_ : Dict = '''##''' + bert_word[j]
snake_case_ : Dict = start + i
snake_case_ : Tuple = False
break
if single_word:
start += 1
return bert_word
def __lowercase ( _a , _a , _a ):
snake_case_ : int = []
for i in range(0 , len(_a ) , 100 ):
snake_case_ : Tuple = ltp_tokenizer.seg(lines[i : i + 100] )[0]
snake_case_ : List[str] = [get_chinese_word(_a ) for r in res]
ltp_res.extend(_a )
assert len(_a ) == len(_a )
snake_case_ : Union[str, Any] = []
for i in range(0 , len(_a ) , 100 ):
snake_case_ : int = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_a , truncation=_a , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(_a ) == len(_a )
snake_case_ : Dict = []
for input_ids, chinese_word in zip(_a , _a ):
snake_case_ : Any = []
for id in input_ids:
snake_case_ : Any = bert_tokenizer._convert_id_to_token(_a )
input_tokens.append(_a )
snake_case_ : int = add_sub_symbol(_a , _a )
snake_case_ : Optional[Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_a ):
if token[:2] == "##":
snake_case_ : Optional[Any] = token[2:]
# save chinese tokens' pos
if len(_a ) == 1 and _is_chinese_char(ord(_a ) ):
ref_id.append(_a )
ref_ids.append(_a )
assert len(_a ) == len(_a )
return ref_ids
def __lowercase ( _a ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
snake_case_ : Tuple = f.readlines()
snake_case_ : Tuple = [line.strip() for line in data if len(_a ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
snake_case_ : Optional[int] = LTP(args.ltp ) # faster in GPU device
snake_case_ : int = BertTokenizer.from_pretrained(args.bert )
snake_case_ : str = prepare_ref(_a , _a , _a )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
snake_case_ : List[Any] = [json.dumps(_a ) + '''\n''' for ref in ref_ids]
f.writelines(_a )
if __name__ == "__main__":
lowercase__ : str = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
lowercase__ : Dict = parser.parse_args()
main(args)
| 123 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : str , lowercase : list[str] ) -> str:
_a = ""
for word_or_phrase in separated:
if not isinstance(lowercase , lowercase ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(lowercase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 692 | 0 |
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
A = [
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def lowerCamelCase ( UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.')
A = parser.parse_args()
if args.check_lib:
A = importlib.import_module('transformers')
A = Path(transformers_module.__file__).parent
else:
A = Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError('The built release does not contain the custom files. Fix this before going further!') | 544 |
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = '\n# Transformers ์ค์น ๋ฐฉ๋ฒ\n! pip install transformers datasets\n# ๋ง์ง๋ง ๋ฆด๋ฆฌ์ค ๋์ ์์ค์์ ์ค์นํ๋ ค๋ฉด, ์ ๋ช
๋ น์ ์ฃผ์์ผ๋ก ๋ฐ๊พธ๊ณ ์๋ ๋ช
๋ น์ ํด์ ํ์ธ์.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCAmelCase_ : List[Any] = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCAmelCase_ : Dict = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 692 | 0 |
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase( lowerCamelCase_ ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__a , scheduler=__a )
@torch.no_grad()
def __call__( self : Tuple , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE : int = 5_0 , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : Tuple , ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__a , )
__snake_case = image.to(self.device )
# set step values
self.scheduler.set_timesteps(__a )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__snake_case = self.unet(__a , __a ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to ฮท in paper and should be between [0, 1]
# do x_t -> x_t-1
__snake_case = self.scheduler.step(__a , __a , __a ).prev_sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(__a )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=__a ), "This is a local test"
| 371 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
lowerCAmelCase_ : Optional[Any] = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
lowerCAmelCase_ : Dict = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
lowerCAmelCase_ : Dict = BeautifulSoup(res.text, 'html.parser')
lowerCAmelCase_ : Optional[int] = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 692 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A__( __lowerCAmelCase ):
_snake_case : Optional[int] = filter(lambda __lowerCAmelCase : p.requires_grad , model.parameters() )
_snake_case : Union[str, Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowercase_ : str = logging.getLogger(__name__)
def A__( __lowerCAmelCase , __lowerCAmelCase ):
if metric == "rouge2":
_snake_case : Tuple = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_snake_case : List[Any] = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_snake_case : int = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
' function.' )
_snake_case : str = ModelCheckpoint(
dirpath=__lowerCAmelCase , filename=__lowerCAmelCase , monitor=F'''val_{metric}''' , mode='max' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def A__( __lowerCAmelCase , __lowerCAmelCase ):
return EarlyStopping(
monitor=F'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=__lowerCAmelCase , verbose=__lowerCAmelCase , )
class lowercase ( pl.Callback ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = {f'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : pl.Trainer , lowerCamelCase_ : pl.LightningModule , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int]=True ):
'''simple docstring'''
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
_snake_case : Union[str, Any] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
_snake_case : int = Path(pl_module.hparams.output_dir )
if type_path == "test":
_snake_case : int = od / 'test_results.txt'
_snake_case : List[Any] = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_snake_case : Optional[Any] = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
_snake_case : Union[str, Any] = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , 'a+' ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
_snake_case : int = metrics[key]
if isinstance(__a , torch.Tensor ):
_snake_case : Optional[Any] = val.item()
_snake_case : Union[str, Any] = f'''{key}: {val:.6f}\n'''
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
_snake_case : List[str] = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(__a )
@rank_zero_only
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
try:
_snake_case : Optional[int] = pl_module.model.model.num_parameters()
except AttributeError:
_snake_case : Union[str, Any] = pl_module.model.num_parameters()
_snake_case : Optional[Any] = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )
@rank_zero_only
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : pl.Trainer , lowerCamelCase_ : pl.LightningModule ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , 'test' )
@rank_zero_only
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : pl.Trainer , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 304 |
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCAmelCase_ : Optional[Any] = datasets.logging.get_logger(__name__)
lowerCAmelCase_ : Tuple = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45โ52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 โ95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
lowerCAmelCase_ : Union[str, Any] = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
lowerCAmelCase_ : Union[str, Any] = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def _lowerCamelCase ( lowercase : Tuple , lowercase : List[Any] , lowercase : Optional[int]=False , lowercase : Dict=False , lowercase : Optional[int]=True , lowercase : Union[str, Any]=False , lowercase : int="dummy_doc" ) -> Union[str, Any]:
_a = {doc: key_lines}
_a = {doc: sys_lines}
_a = {}
_a = 0
_a = 0
_a = 0
_a = 0
_a = 0
_a = 0
_a , _a = reader.get_doc_mentions(lowercase , key_doc_lines[doc] , lowercase )
key_singletons_num += singletons_num
if NP_only or min_span:
_a = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase )
_a , _a = reader.get_doc_mentions(lowercase , sys_doc_lines[doc] , lowercase )
sys_singletons_num += singletons_num
if NP_only or min_span:
_a = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase )
if remove_nested:
_a , _a = reader.remove_nested_coref_mentions(lowercase , lowercase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_a , _a = reader.remove_nested_coref_mentions(lowercase , lowercase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_a = reader.get_mention_assignments(lowercase , lowercase )
_a = reader.get_mention_assignments(lowercase , lowercase )
_a = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
"Number of resulting singleton clusters in the key "
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
"files, respectively" )
return doc_coref_infos
def _lowerCamelCase ( lowercase : List[Any] , lowercase : Any , lowercase : Optional[Any] , lowercase : Union[str, Any] , lowercase : Any , lowercase : List[str] , lowercase : Dict ) -> str:
_a = get_coref_infos(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
_a = {}
_a = 0
_a = 0
for name, metric in metrics:
_a , _a , _a = evaluator.evaluate_documents(lowercase , lowercase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(10 ) , F'Recall: {recall * 100:.2f}' , F' Precision: {precision * 100:.2f}' , F' F1: {fa * 100:.2f}' , )
if conll_subparts_num == 3:
_a = (conll / 3) * 100
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({"conll_score": conll} )
return output_scores
def _lowerCamelCase ( lowercase : Any ) -> str:
_a = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
_a = line.split()[5]
if not parse_col == "-":
_a = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE (datasets.Metric ):
"""simple docstring"""
def UpperCamelCase__ ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] , )
def UpperCamelCase__ ( self : int , __a : Any , __a : int , __a : Optional[Any]=True , __a : Optional[Any]=False , __a : str=False , __a : List[str]=False ):
_a = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
_a = util.check_gold_parse_annotation(__a )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_a = evaluate(
key_lines=__a , sys_lines=__a , metrics=__a , NP_only=__a , remove_nested=__a , keep_singletons=__a , min_span=__a , )
return score
| 692 | 0 |
import os
def _lowerCamelCase ( __lowerCamelCase = "input.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(__lowerCamelCase ) , __lowerCamelCase ) ) as input_file:
UpperCAmelCase__ : Optional[Any] = [
[int(__lowerCamelCase ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
UpperCAmelCase__ : Union[str, Any] = len(__lowerCamelCase )
UpperCAmelCase__ : Any = len(matrix[0] )
UpperCAmelCase__ : Optional[int] = [[-1 for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
UpperCAmelCase__ : str = matrix[i][0]
for j in range(1 , __lowerCamelCase ):
for i in range(__lowerCamelCase ):
UpperCAmelCase__ : List[str] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __lowerCamelCase ):
UpperCAmelCase__ : List[Any] = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
UpperCAmelCase__ : Dict = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 79 |
'''simple docstring'''
import math
def _lowerCamelCase ( lowercase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCamelCase ( lowercase : float = 0.1 ) -> int:
_a = 3
_a = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowercase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 692 | 0 |
"""simple docstring"""
from typing import Any
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = data
lowerCAmelCase__ :Tuple = None
def __repr__( self ):
'''simple docstring'''
return F"Node({self.data})"
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = None
def __iter__( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.head
while node:
yield node.data
lowerCAmelCase__ :Optional[Any] = node.next
def __len__( self ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ):
'''simple docstring'''
return "->".join([str(__a ) for item in self] )
def __getitem__( self , __UpperCAmelCase ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
lowerCAmelCase__ :Tuple = self.head
for _ in range(__a ):
lowerCAmelCase__ :Dict = current.next
lowerCAmelCase__ :int = data
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
self.insert_nth(len(self ) , __a )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
self.insert_nth(0 , __a )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
lowerCAmelCase__ :Optional[Any] = Node(__a )
if self.head is None:
lowerCAmelCase__ :List[Any] = new_node
elif index == 0:
lowerCAmelCase__ :Any = self.head # link new_node to head
lowerCAmelCase__ :str = new_node
else:
lowerCAmelCase__ :str = self.head
for _ in range(index - 1 ):
lowerCAmelCase__ :Tuple = temp.next
lowerCAmelCase__ :Optional[int] = temp.next
lowerCAmelCase__ :Dict = new_node
def snake_case ( self ): # print every node data
'''simple docstring'''
print(self )
def snake_case ( self ):
'''simple docstring'''
return self.delete_nth(0 )
def snake_case ( self ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def snake_case ( self , __UpperCAmelCase = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
lowerCAmelCase__ :Dict = self.head # default first node
if index == 0:
lowerCAmelCase__ :Tuple = self.head.next
else:
lowerCAmelCase__ :Optional[int] = self.head
for _ in range(index - 1 ):
lowerCAmelCase__ :Optional[int] = temp.next
lowerCAmelCase__ :Tuple = temp.next
lowerCAmelCase__ :Optional[Any] = temp.next.next
return delete_node.data
def snake_case ( self ):
'''simple docstring'''
return self.head is None
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = None
lowerCAmelCase__ :str = self.head
while current:
# Store the current node's next node.
lowerCAmelCase__ :Any = current.next
# Make the current node's next point backwards
lowerCAmelCase__ :List[str] = prev
# Make the previous node be the current node
lowerCAmelCase__ :Optional[Any] = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase__ :Optional[Any] = next_node
# Return prev in order to put the head at the end
lowerCAmelCase__ :int = prev
def __A () ->None:
"""simple docstring"""
lowerCAmelCase__ :List[str] = LinkedList()
assert linked_list.is_empty() is True
assert str(_SCREAMING_SNAKE_CASE ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_SCREAMING_SNAKE_CASE ) == i
linked_list.insert_nth(_SCREAMING_SNAKE_CASE , i + 1 )
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_SCREAMING_SNAKE_CASE ) == 9
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase__ :str = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(-8 , 1 ) )
def __A () ->None:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = [
-9,
100,
Node(7734_5112 ),
'dlrow olleH',
7,
5555,
0,
-192.5_5555,
'Hello, world!',
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
lowerCAmelCase__ :Any = LinkedList()
for i in test_input:
linked_list.insert_tail(_SCREAMING_SNAKE_CASE )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_SCREAMING_SNAKE_CASE ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase__ :Optional[Any] = linked_list.delete_head()
assert result == -9
assert (
str(_SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase__ :Optional[Any] = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(_SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase__ :Union[str, Any] = linked_list.delete_nth(10 )
assert result is None
assert (
str(_SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_SCREAMING_SNAKE_CASE )
assert (
str(_SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_SCREAMING_SNAKE_CASE )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __A () ->Optional[Any]:
"""simple docstring"""
from doctest import testmod
testmod()
lowerCAmelCase__ :Union[str, Any] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_SCREAMING_SNAKE_CASE )
print('\nReading/changing Node data using indexing:' )
print(F"Element at Position 1: {linked_list[1]}" )
lowerCAmelCase__ :Optional[int] = input('Enter New Value: ' ).strip()
print('New list:' )
print(_SCREAMING_SNAKE_CASE )
print(F"length of linked_list is : {len(_SCREAMING_SNAKE_CASE )}" )
if __name__ == "__main__":
main()
| 93 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =(CMStochasticIterativeScheduler,)
__a =10
def UpperCamelCase__ ( self : Union[str, Any] , **__a : str ):
_a = {
"num_train_timesteps": 2_01,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
config.update(**__a )
return config
def UpperCamelCase__ ( self : List[Any] ):
_a = 10
_a = self.get_scheduler_config()
_a = self.scheduler_classes[0](**__a )
scheduler.set_timesteps(__a )
_a = scheduler.timesteps[0]
_a = scheduler.timesteps[1]
_a = self.dummy_sample
_a = 0.1 * sample
_a = scheduler.step(__a , __a , __a ).prev_sample
_a = scheduler.step(__a , __a , __a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase__ ( self : Any ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__a )
def UpperCamelCase__ ( self : int ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=__a )
def UpperCamelCase__ ( self : str ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__a )
_a = 1
scheduler.set_timesteps(__a )
_a = scheduler.timesteps
_a = torch.manual_seed(0 )
_a = self.dummy_model()
_a = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(__a ):
# 1. scale model input
_a = scheduler.scale_model_input(__a , __a )
# 2. predict noise residual
_a = model(__a , __a )
# 3. predict previous sample x_t-1
_a = scheduler.step(__a , __a , __a , generator=__a ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(__a ) )
_a = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__a )
_a = [1_06, 0]
scheduler.set_timesteps(timesteps=__a )
_a = scheduler.timesteps
_a = torch.manual_seed(0 )
_a = self.dummy_model()
_a = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
_a = scheduler.scale_model_input(__a , __a )
# 2. predict noise residual
_a = model(__a , __a )
# 3. predict previous sample x_t-1
_a = scheduler.step(__a , __a , __a , generator=__a ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(__a ) )
_a = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def UpperCamelCase__ ( self : List[Any] ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__a )
_a = [39, 30, 12, 15, 0]
with self.assertRaises(__a , msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=__a )
def UpperCamelCase__ ( self : Tuple ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__a )
_a = [39, 30, 12, 1, 0]
_a = len(__a )
with self.assertRaises(__a , msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=__a , timesteps=__a )
def UpperCamelCase__ ( self : str ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__a )
_a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__a , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=__a )
| 692 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCAmelCase__ = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 83 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , *__a : int , **__a : Optional[Any] ):
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 692 | 0 |
from functools import lru_cache
def _A ( __snake_case :int ) -> set:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__snake_case )
if n > 1:
factors.add(__snake_case )
return factors
@lru_cache
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
return len(unique_prime_factors(__snake_case ) )
def _A ( __snake_case :list ) -> bool:
"""simple docstring"""
return len(set(__snake_case ) ) in (0, 1)
def _A ( __snake_case :int ) -> list:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 2
while True:
# Increment each value of a generated range
__SCREAMING_SNAKE_CASE = [base + i for i in range(__snake_case )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__SCREAMING_SNAKE_CASE = [upf_len(__snake_case ) for x in group]
checker.append(__snake_case )
# If all numbers in the list are equal, return the group variable.
if equality(__snake_case ):
return group
# Increment our base variable by 1
base += 1
def _A ( __snake_case :int = 4 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = run(__snake_case )
return results[0] if len(__snake_case ) else None
if __name__ == "__main__":
print(solution())
| 693 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =(IPNDMScheduler,)
SCREAMING_SNAKE_CASE__ =(("""num_inference_steps""", 50),)
def __lowerCAmelCase ( self, **_a ) -> str:
__SCREAMING_SNAKE_CASE = {"num_train_timesteps": 10_00}
config.update(**_a )
return config
def __lowerCAmelCase ( self, _a=0, **_a ) -> List[Any]:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
if time_step is None:
__SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
__SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a )
new_scheduler.set_timesteps(_a )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self ) -> str:
pass
def __lowerCAmelCase ( self, _a=0, **_a ) -> int:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals (must be after setting timesteps)
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
if time_step is None:
__SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
__SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a )
# copy over dummy past residuals
new_scheduler.set_timesteps(_a )
# copy over dummy past residual (must be after setting timesteps)
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self, **_a ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE = 10
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(_a )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE = model(_a, _a )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE = model(_a, _a )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample
return sample
def __lowerCAmelCase ( self ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
if num_inference_steps is not None and hasattr(_a, "set_timesteps" ):
scheduler.set_timesteps(_a )
elif num_inference_steps is not None and not hasattr(_a, "set_timesteps" ):
__SCREAMING_SNAKE_CASE = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.timesteps[5]
__SCREAMING_SNAKE_CASE = scheduler.timesteps[6]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def __lowerCAmelCase ( self ) -> str:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a, time_step=_a )
def __lowerCAmelCase ( self ) -> Optional[Any]:
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=_a, time_step=_a )
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.full_loop()
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 693 | 1 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self, _a, _a ) -> Union[str, Any]:
return f'''gaussian_noise_s={seed}_shape={"_".join([str(_a ) for s in shape] )}.npy'''
def __lowerCAmelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __lowerCAmelCase ( self, _a=0, _a=(4, 4, 64, 64), _a=False ) -> Dict:
__SCREAMING_SNAKE_CASE = jnp.bfloataa if fpaa else jnp.floataa
__SCREAMING_SNAKE_CASE = jnp.array(load_hf_numpy(self.get_file_format(_a, _a ) ), dtype=_a )
return image
def __lowerCAmelCase ( self, _a=False, _a="CompVis/stable-diffusion-v1-4" ) -> Any:
__SCREAMING_SNAKE_CASE = jnp.bfloataa if fpaa else jnp.floataa
__SCREAMING_SNAKE_CASE = "bf16" if fpaa else None
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = FlaxUNetaDConditionModel.from_pretrained(
_a, subfolder="unet", dtype=_a, revision=_a )
return model, params
def __lowerCAmelCase ( self, _a=0, _a=(4, 77, 7_68), _a=False ) -> Tuple:
__SCREAMING_SNAKE_CASE = jnp.bfloataa if fpaa else jnp.floataa
__SCREAMING_SNAKE_CASE = jnp.array(load_hf_numpy(self.get_file_format(_a, _a ) ), dtype=_a )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 10_00, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def __lowerCAmelCase ( self, _a, _a, _a ) -> Dict:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4", fpaa=_a )
__SCREAMING_SNAKE_CASE = self.get_latents(_a, fpaa=_a )
__SCREAMING_SNAKE_CASE = self.get_encoder_hidden_states(_a, fpaa=_a )
__SCREAMING_SNAKE_CASE = model.apply(
{"params": params}, _a, jnp.array(_a, dtype=jnp.intaa ), encoder_hidden_states=_a, ).sample
assert sample.shape == latents.shape
__SCREAMING_SNAKE_CASE = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE = jnp.array(_a, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(_a, _a, atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 10_00, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def __lowerCAmelCase ( self, _a, _a, _a ) -> str:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.get_unet_model(model_id="stabilityai/stable-diffusion-2", fpaa=_a )
__SCREAMING_SNAKE_CASE = self.get_latents(_a, shape=(4, 4, 96, 96), fpaa=_a )
__SCREAMING_SNAKE_CASE = self.get_encoder_hidden_states(_a, shape=(4, 77, 10_24), fpaa=_a )
__SCREAMING_SNAKE_CASE = model.apply(
{"params": params}, _a, jnp.array(_a, dtype=jnp.intaa ), encoder_hidden_states=_a, ).sample
assert sample.shape == latents.shape
__SCREAMING_SNAKE_CASE = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE = jnp.array(_a, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(_a, _a, atol=1E-2 )
| 693 |
import random
from .binary_exp_mod import bin_exp_mod
def _A ( __snake_case :List[Any] , __snake_case :Union[str, Any]=1000 ) -> int:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__SCREAMING_SNAKE_CASE = n - 1
__SCREAMING_SNAKE_CASE = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__SCREAMING_SNAKE_CASE = 0
while count < prec:
__SCREAMING_SNAKE_CASE = random.randint(2 , n - 1 )
__SCREAMING_SNAKE_CASE = bin_exp_mod(__snake_case , __snake_case , __snake_case )
if b != 1:
__SCREAMING_SNAKE_CASE = True
for _ in range(__snake_case ):
if b == n - 1:
__SCREAMING_SNAKE_CASE = False
break
__SCREAMING_SNAKE_CASE = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_snake_case : int = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 693 | 1 |
from __future__ import annotations
def _A ( __snake_case :list[int] ) -> list[int]: # This function is recursive
"""simple docstring"""
__SCREAMING_SNAKE_CASE = len(__snake_case )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__SCREAMING_SNAKE_CASE = array[0]
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = []
while not is_found and i < array_length:
if array[i] < pivot:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = [element for element in array[i:] if element >= array[i]]
__SCREAMING_SNAKE_CASE = longest_subsequence(__snake_case )
if len(__snake_case ) > len(__snake_case ):
__SCREAMING_SNAKE_CASE = temp_array
else:
i += 1
__SCREAMING_SNAKE_CASE = [element for element in array[1:] if element >= pivot]
__SCREAMING_SNAKE_CASE = [pivot, *longest_subsequence(__snake_case )]
if len(__snake_case ) > len(__snake_case ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _A ( __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
__SCREAMING_SNAKE_CASE = ksize + 1
__SCREAMING_SNAKE_CASE = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__snake_case ):
for x in range(__snake_case ):
# distance from center
__SCREAMING_SNAKE_CASE = x - ksize // 2
__SCREAMING_SNAKE_CASE = y - ksize // 2
# degree to radiant
__SCREAMING_SNAKE_CASE = theta / 180 * np.pi
__SCREAMING_SNAKE_CASE = np.cos(_theta )
__SCREAMING_SNAKE_CASE = np.sin(_theta )
# get kernel x
__SCREAMING_SNAKE_CASE = cos_theta * px + sin_theta * py
# get kernel y
__SCREAMING_SNAKE_CASE = -sin_theta * px + cos_theta * py
# fill kernel
__SCREAMING_SNAKE_CASE = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case : Union[str, Any] = imread('../image_data/lena.jpg')
# turn image in gray scale value
_snake_case : List[str] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case : int = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
_snake_case : List[str] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case : Optional[Any] = out / out.max() * 2_55
_snake_case : Union[str, Any] = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 693 | 1 |
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [1]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 0, 0
__SCREAMING_SNAKE_CASE = ugly_nums[ia] * 2
__SCREAMING_SNAKE_CASE = ugly_nums[ia] * 3
__SCREAMING_SNAKE_CASE = ugly_nums[ia] * 5
for _ in range(1 , __snake_case ):
__SCREAMING_SNAKE_CASE = min(__snake_case , __snake_case , __snake_case )
ugly_nums.append(__snake_case )
if next_num == next_a:
ia += 1
__SCREAMING_SNAKE_CASE = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__SCREAMING_SNAKE_CASE = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__SCREAMING_SNAKE_CASE = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"""{ugly_numbers(2_00) = }""")
| 693 |
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
assert isinstance(__snake_case , __snake_case ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
__SCREAMING_SNAKE_CASE = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(__snake_case )
else:
__SCREAMING_SNAKE_CASE = sylvester(number - 1 )
__SCREAMING_SNAKE_CASE = num - 1
__SCREAMING_SNAKE_CASE = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 693 | 1 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
_snake_case : Dict = logging.get_logger(__name__)
_snake_case : Union[str, Any] = 'Hello, World!'
_snake_case : Any = 'en_XX'
def _A ( __snake_case :str , __snake_case :str , __snake_case :bool ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Path("data_bin" )
__SCREAMING_SNAKE_CASE = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(__snake_case ).parent ) , checkpoint_file=Path(__snake_case ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(__snake_case ) , bpe="sentencepiece" , sentencepiece_model=str(Path(__snake_case ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(__snake_case )
__SCREAMING_SNAKE_CASE = xmod.model.encoder.sentence_encoder
__SCREAMING_SNAKE_CASE = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__SCREAMING_SNAKE_CASE = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , __snake_case )
__SCREAMING_SNAKE_CASE = XmodForSequenceClassification(__snake_case ) if classification_head else XmodForMaskedLM(__snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
__SCREAMING_SNAKE_CASE = xmod_sent_encoder.embed_tokens.weight
__SCREAMING_SNAKE_CASE = xmod_sent_encoder.embed_positions.weight
__SCREAMING_SNAKE_CASE = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__SCREAMING_SNAKE_CASE = xmod_sent_encoder.layernorm_embedding.weight
__SCREAMING_SNAKE_CASE = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__SCREAMING_SNAKE_CASE = model.roberta.encoder.layer[i]
__SCREAMING_SNAKE_CASE = xmod_sent_encoder.layers[i]
# self attention
__SCREAMING_SNAKE_CASE = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
__SCREAMING_SNAKE_CASE = xmod_layer.self_attn.q_proj.weight
__SCREAMING_SNAKE_CASE = xmod_layer.self_attn.q_proj.bias
__SCREAMING_SNAKE_CASE = xmod_layer.self_attn.k_proj.weight
__SCREAMING_SNAKE_CASE = xmod_layer.self_attn.k_proj.bias
__SCREAMING_SNAKE_CASE = xmod_layer.self_attn.v_proj.weight
__SCREAMING_SNAKE_CASE = xmod_layer.self_attn.v_proj.bias
# self-attention output
__SCREAMING_SNAKE_CASE = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
__SCREAMING_SNAKE_CASE = xmod_layer.self_attn.out_proj.weight
__SCREAMING_SNAKE_CASE = xmod_layer.self_attn.out_proj.bias
__SCREAMING_SNAKE_CASE = xmod_layer.self_attn_layer_norm.weight
__SCREAMING_SNAKE_CASE = xmod_layer.self_attn_layer_norm.bias
# intermediate
__SCREAMING_SNAKE_CASE = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
__SCREAMING_SNAKE_CASE = xmod_layer.fca.weight
__SCREAMING_SNAKE_CASE = xmod_layer.fca.bias
# output
__SCREAMING_SNAKE_CASE = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
__SCREAMING_SNAKE_CASE = xmod_layer.fca.weight
__SCREAMING_SNAKE_CASE = xmod_layer.fca.bias
__SCREAMING_SNAKE_CASE = xmod_layer.final_layer_norm.weight
__SCREAMING_SNAKE_CASE = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__SCREAMING_SNAKE_CASE = xmod_layer.adapter_layer_norm.weight
__SCREAMING_SNAKE_CASE = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__SCREAMING_SNAKE_CASE = bert_output.adapter_modules[lang_code]
__SCREAMING_SNAKE_CASE = xmod_layer.adapter_modules[lang_code]
__SCREAMING_SNAKE_CASE = from_adapter.fca.weight
__SCREAMING_SNAKE_CASE = from_adapter.fca.bias
__SCREAMING_SNAKE_CASE = from_adapter.fca.weight
__SCREAMING_SNAKE_CASE = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__SCREAMING_SNAKE_CASE = xmod_sent_encoder.layer_norm.weight
__SCREAMING_SNAKE_CASE = xmod_sent_encoder.layer_norm.bias
if classification_head:
__SCREAMING_SNAKE_CASE = xmod.model.classification_heads["mnli"].dense.weight
__SCREAMING_SNAKE_CASE = xmod.model.classification_heads["mnli"].dense.bias
__SCREAMING_SNAKE_CASE = xmod.model.classification_heads["mnli"].out_proj.weight
__SCREAMING_SNAKE_CASE = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
__SCREAMING_SNAKE_CASE = xmod.model.encoder.lm_head.dense.weight
__SCREAMING_SNAKE_CASE = xmod.model.encoder.lm_head.dense.bias
__SCREAMING_SNAKE_CASE = xmod.model.encoder.lm_head.layer_norm.weight
__SCREAMING_SNAKE_CASE = xmod.model.encoder.lm_head.layer_norm.bias
__SCREAMING_SNAKE_CASE = xmod.model.encoder.lm_head.weight
__SCREAMING_SNAKE_CASE = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__SCREAMING_SNAKE_CASE = xmod.encode(__snake_case ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(__snake_case )
__SCREAMING_SNAKE_CASE = model(__snake_case )[0]
if classification_head:
__SCREAMING_SNAKE_CASE = xmod.model.classification_heads["mnli"](xmod.extract_features(__snake_case ) )
else:
__SCREAMING_SNAKE_CASE = xmod.model(__snake_case , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__SCREAMING_SNAKE_CASE = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
__SCREAMING_SNAKE_CASE = torch.allclose(__snake_case , __snake_case , atol=1e-3 )
print("Do both models output the same tensors?" , "๐ฅ" if success else "๐ฉ" )
if not success:
raise Exception("Something went wRoNg" )
Path(__snake_case ).mkdir(parents=__snake_case , exist_ok=__snake_case )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
if __name__ == "__main__":
_snake_case : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
_snake_case : List[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 693 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __lowerCAmelCase ( *_a, **_a ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", )
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_a ), [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
], )
@require_tf
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", framework="tf" )
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(_a ), [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
], )
@slow
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", )
# This is an image of 2 cats with remotes and no planes
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_a ), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
@slow
@require_tf
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", framework="tf" )
# This is an image of 2 cats with remotes and no planes
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_a ), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
| 693 | 1 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
def __lowerCAmelCase ( self ) -> List[str]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(_a )
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = self._create_example_records()
__SCREAMING_SNAKE_CASE = Dataset.from_list(_a )
self.assertListEqual(dset.column_names, ["col_1", "col_2"] )
for i, r in enumerate(_a ):
self.assertDictEqual(_a, example_records[i] )
def __lowerCAmelCase ( self ) -> int:
__SCREAMING_SNAKE_CASE = self._create_example_records()
__SCREAMING_SNAKE_CASE = Dataset.from_list(_a )
__SCREAMING_SNAKE_CASE = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info, dset_from_dict.info )
def __lowerCAmelCase ( self ) -> int: # checks what happens with missing columns
__SCREAMING_SNAKE_CASE = [{"col_1": 1}, {"col_2": "x"}]
__SCREAMING_SNAKE_CASE = Dataset.from_list(_a )
self.assertDictEqual(dset[0], {"col_1": 1} )
self.assertDictEqual(dset[1], {"col_1": None} ) # NB: first record is used for columns
def __lowerCAmelCase ( self ) -> Optional[int]: # checks if the type can be inferred from the second record
__SCREAMING_SNAKE_CASE = [{"col_1": []}, {"col_1": [1, 2]}]
__SCREAMING_SNAKE_CASE = Dataset.from_list(_a )
self.assertEqual(dset.info.features["col_1"], Sequence(Value("int64" ) ) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = Dataset.from_list([] )
self.assertEqual(len(_a ), 0 )
self.assertListEqual(dset.column_names, [] )
| 693 |
from __future__ import annotations
import math
def _A ( __snake_case :int , __snake_case :int , __snake_case :bool , __snake_case :list[int] , __snake_case :float ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__snake_case ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__SCREAMING_SNAKE_CASE = math.log(len(__snake_case ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 693 | 1 |
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a ) -> str:
# we need a list not a string, so do something to change the type
__SCREAMING_SNAKE_CASE = arr.split("," )
def __lowerCAmelCase ( self ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = [int(self.array[0] )] * len(self.array )
__SCREAMING_SNAKE_CASE = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
__SCREAMING_SNAKE_CASE = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
__SCREAMING_SNAKE_CASE = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
_snake_case : Optional[Any] = input('please input some numbers:')
_snake_case : Union[str, Any] = SubArray(whole_array)
_snake_case : Optional[int] = array.solve_sub_array()
print(('the results is:', re))
| 693 |
def _A ( __snake_case :bytes ) -> str:
"""simple docstring"""
return "".join([hex(__snake_case )[2:].zfill(2 ).upper() for byte in list(__snake_case )] )
def _A ( __snake_case :str ) -> bytes:
"""simple docstring"""
if (len(__snake_case ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__snake_case ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(__snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 | 1 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _A ( __snake_case :str , __snake_case :Union[str, Any] , __snake_case :Optional[int] , __snake_case :Optional[Any] ) -> str:
"""simple docstring"""
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def _A ( __snake_case :Any , __snake_case :int , __snake_case :Any , __snake_case :str , __snake_case :Union[str, Any]=True ) -> Tuple:
"""simple docstring"""
model.train()
__SCREAMING_SNAKE_CASE = model(__snake_case )
__SCREAMING_SNAKE_CASE = F.mse_loss(__snake_case , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__snake_case )
def _A ( __snake_case :Optional[int] , __snake_case :Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
set_seed(42 )
__SCREAMING_SNAKE_CASE = RegressionModel()
__SCREAMING_SNAKE_CASE = deepcopy(__snake_case )
__SCREAMING_SNAKE_CASE = RegressionDataset(length=80 )
__SCREAMING_SNAKE_CASE = DataLoader(__snake_case , batch_size=16 )
model.to(accelerator.device )
if sched:
__SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=1e-3 )
__SCREAMING_SNAKE_CASE = AdamW(params=ddp_model.parameters() , lr=1e-3 )
__SCREAMING_SNAKE_CASE = LambdaLR(__snake_case , lr_lambda=lambda __snake_case : epoch**0.6_5 )
__SCREAMING_SNAKE_CASE = LambdaLR(__snake_case , lr_lambda=lambda __snake_case : epoch**0.6_5 )
# Make a copy of `model`
if sched:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.prepare(__snake_case , __snake_case , __snake_case , __snake_case )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.prepare(__snake_case , __snake_case )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _A ( __snake_case :str ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_training_setup(__snake_case )
# Use a single batch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = next(iter(__snake_case ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.gather((ddp_input, ddp_target) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__snake_case ):
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
else:
# Sync grads
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__snake_case , __snake_case , __snake_case , __snake_case )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__SCREAMING_SNAKE_CASE = ddp_input[torch.randperm(len(__snake_case ) )]
def _A ( __snake_case :Tuple ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_training_setup(__snake_case )
# Use a single batch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = next(iter(__snake_case ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.gather((ddp_input, ddp_target) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__snake_case ):
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
else:
# Sync grads
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__SCREAMING_SNAKE_CASE = ddp_input[torch.randperm(len(__snake_case ) )]
def _A ( __snake_case :Tuple=False , __snake_case :Optional[int]=False ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Accelerator(
split_batches=__snake_case , dispatch_batches=__snake_case , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_training_setup(__snake_case )
for iteration, batch in enumerate(__snake_case ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = batch.values()
# Gather the distributed inputs and targs for the base model
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.gather((ddp_input, ddp_target) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__snake_case ):
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__snake_case ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__SCREAMING_SNAKE_CASE = ddp_input[torch.randperm(len(__snake_case ) )]
GradientState._reset_state()
def _A ( __snake_case :Union[str, Any]=False , __snake_case :Optional[int]=False ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Accelerator(
split_batches=__snake_case , dispatch_batches=__snake_case , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_training_setup(__snake_case , __snake_case )
for iteration, batch in enumerate(__snake_case ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = batch.values()
# Gather the distributed inputs and targs for the base model
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.gather((ddp_input, ddp_target) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__snake_case )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__snake_case ):
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
__SCREAMING_SNAKE_CASE = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__snake_case ))
if accelerator.num_processes > 1:
check_model_parameters(__snake_case , __snake_case , __snake_case , __snake_case )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _A ( ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Accelerator()
__SCREAMING_SNAKE_CASE = RegressionDataset(length=80 )
__SCREAMING_SNAKE_CASE = DataLoader(__snake_case , batch_size=16 )
__SCREAMING_SNAKE_CASE = RegressionDataset(length=96 )
__SCREAMING_SNAKE_CASE = DataLoader(__snake_case , batch_size=16 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.prepare(__snake_case , __snake_case )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__snake_case ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__snake_case )
if iteration < len(__snake_case ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__snake_case ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__snake_case )
if batch_num < len(__snake_case ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _A ( ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Accelerator()
__SCREAMING_SNAKE_CASE = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(__snake_case )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(__snake_case )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(__snake_case , __snake_case )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(__snake_case , __snake_case )
def _A ( __snake_case :Any ) -> Any:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 693 |
from functools import lru_cache
def _A ( __snake_case :int ) -> set:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__snake_case )
if n > 1:
factors.add(__snake_case )
return factors
@lru_cache
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
return len(unique_prime_factors(__snake_case ) )
def _A ( __snake_case :list ) -> bool:
"""simple docstring"""
return len(set(__snake_case ) ) in (0, 1)
def _A ( __snake_case :int ) -> list:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 2
while True:
# Increment each value of a generated range
__SCREAMING_SNAKE_CASE = [base + i for i in range(__snake_case )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__SCREAMING_SNAKE_CASE = [upf_len(__snake_case ) for x in group]
checker.append(__snake_case )
# If all numbers in the list are equal, return the group variable.
if equality(__snake_case ):
return group
# Increment our base variable by 1
base += 1
def _A ( __snake_case :int = 4 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = run(__snake_case )
return results[0] if len(__snake_case ) else None
if __name__ == "__main__":
print(solution())
| 693 | 1 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def _A ( __snake_case :Union[str, Any] , __snake_case :Tuple=10 ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for _ in range(__snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def _A ( __snake_case :int , __snake_case :str=10 ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for step in range(__snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = os.path.join(__snake_case , "schedule.bin" )
torch.save(scheduler.state_dict() , __snake_case )
__SCREAMING_SNAKE_CASE = torch.load(__snake_case )
scheduler.load_state_dict(__snake_case )
return lrs
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self, _a, _a, _a ) -> List[str]:
self.assertEqual(len(_a ), len(_a ) )
for a, b in zip(_a, _a ):
self.assertAlmostEqual(_a, _a, delta=_a )
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = torch.tensor([0.1, -0.2, -0.1], requires_grad=_a )
__SCREAMING_SNAKE_CASE = torch.tensor([0.4, 0.2, -0.5] )
__SCREAMING_SNAKE_CASE = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__SCREAMING_SNAKE_CASE = AdamW(params=[w], lr=2E-1, weight_decay=0.0 )
for _ in range(1_00 ):
__SCREAMING_SNAKE_CASE = criterion(_a, _a )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist(), [0.4, 0.2, -0.5], tol=1E-2 )
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE = torch.tensor([0.1, -0.2, -0.1], requires_grad=_a )
__SCREAMING_SNAKE_CASE = torch.tensor([0.4, 0.2, -0.5] )
__SCREAMING_SNAKE_CASE = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__SCREAMING_SNAKE_CASE = Adafactor(
params=[w], lr=1E-2, eps=(1E-3_0, 1E-3), clip_threshold=1.0, decay_rate=-0.8, betaa=_a, weight_decay=0.0, relative_step=_a, scale_parameter=_a, warmup_init=_a, )
for _ in range(10_00 ):
__SCREAMING_SNAKE_CASE = criterion(_a, _a )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist(), [0.4, 0.2, -0.5], tol=1E-2 )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =nn.Linear(50 , 50 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ =AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ =10
def __lowerCAmelCase ( self, _a, _a, _a, _a=None ) -> Dict:
self.assertEqual(len(_a ), len(_a ) )
for a, b in zip(_a, _a ):
self.assertAlmostEqual(_a, _a, delta=_a, msg=_a )
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
__SCREAMING_SNAKE_CASE = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = data
__SCREAMING_SNAKE_CASE = scheduler_func(self.optimizer, **_a )
self.assertEqual(len([scheduler.get_lr()[0]] ), 1 )
__SCREAMING_SNAKE_CASE = unwrap_schedule(_a, self.num_steps )
self.assertListAlmostEqual(
_a, _a, tol=1E-2, msg=f'''failed for {scheduler_func} in normal scheduler''', )
__SCREAMING_SNAKE_CASE = scheduler_func(self.optimizer, **_a )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(_a ) # wrap to test picklability of the schedule
__SCREAMING_SNAKE_CASE = unwrap_and_save_reload_schedule(_a, self.num_steps )
self.assertListEqual(_a, _a, msg=f'''failed for {scheduler_func} in save and reload''' )
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a ) -> Tuple:
__SCREAMING_SNAKE_CASE = fn
def __call__( self, *_a, **_a ) -> Dict:
return self.fn(*_a, **_a )
@classmethod
def __lowerCAmelCase ( self, _a ) -> Any:
__SCREAMING_SNAKE_CASE = list(map(self, scheduler.lr_lambdas ) )
| 693 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _A ( __snake_case :Dict ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = VideoMAEConfig()
set_architecture_configs(__snake_case , __snake_case )
if "finetuned" not in model_name:
__SCREAMING_SNAKE_CASE = False
if "finetuned" in model_name:
__SCREAMING_SNAKE_CASE = "huggingface/label-files"
if "kinetics" in model_name:
__SCREAMING_SNAKE_CASE = 400
__SCREAMING_SNAKE_CASE = "kinetics400-id2label.json"
elif "ssv2" in model_name:
__SCREAMING_SNAKE_CASE = 174
__SCREAMING_SNAKE_CASE = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
__SCREAMING_SNAKE_CASE = {int(__snake_case ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def _A ( __snake_case :Dict , __snake_case :Optional[Any] ) -> List[Any]:
"""simple docstring"""
if "small" in model_name:
__SCREAMING_SNAKE_CASE = 384
__SCREAMING_SNAKE_CASE = 1536
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = 192
__SCREAMING_SNAKE_CASE = 768
elif "large" in model_name:
__SCREAMING_SNAKE_CASE = 1024
__SCREAMING_SNAKE_CASE = 4096
__SCREAMING_SNAKE_CASE = 24
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 512
__SCREAMING_SNAKE_CASE = 2048
elif "huge" in model_name:
__SCREAMING_SNAKE_CASE = 1280
__SCREAMING_SNAKE_CASE = 5120
__SCREAMING_SNAKE_CASE = 32
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 640
__SCREAMING_SNAKE_CASE = 2560
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def _A ( __snake_case :List[Any] ) -> Optional[int]:
"""simple docstring"""
if "encoder." in name:
__SCREAMING_SNAKE_CASE = name.replace("encoder." , "" )
if "cls_token" in name:
__SCREAMING_SNAKE_CASE = name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.self" )
if "attn" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.attention" )
if "norm1" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__SCREAMING_SNAKE_CASE = name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__SCREAMING_SNAKE_CASE = name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace("head" , "classifier" )
return name
def _A ( __snake_case :Union[str, Any] , __snake_case :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = orig_state_dict.pop(__snake_case )
if key.startswith("encoder." ):
__SCREAMING_SNAKE_CASE = key.replace("encoder." , "" )
if "qkv" in key:
__SCREAMING_SNAKE_CASE = key.split("." )
if key.startswith("decoder.blocks" ):
__SCREAMING_SNAKE_CASE = config.decoder_hidden_size
__SCREAMING_SNAKE_CASE = int(key_split[2] )
__SCREAMING_SNAKE_CASE = "decoder.decoder_layers."
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = config.hidden_size
__SCREAMING_SNAKE_CASE = int(key_split[1] )
__SCREAMING_SNAKE_CASE = "videomae.encoder.layer."
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = val
return orig_state_dict
def _A ( ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
__SCREAMING_SNAKE_CASE = np.load(__snake_case )
return list(__snake_case )
def _A ( __snake_case :Optional[int] , __snake_case :List[str] , __snake_case :Union[str, Any] , __snake_case :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_videomae_config(__snake_case )
if "finetuned" in model_name:
__SCREAMING_SNAKE_CASE = VideoMAEForVideoClassification(__snake_case )
else:
__SCREAMING_SNAKE_CASE = VideoMAEForPreTraining(__snake_case )
# download original checkpoint, hosted on Google Drive
__SCREAMING_SNAKE_CASE = "pytorch_model.bin"
gdown.cached_download(__snake_case , __snake_case , quiet=__snake_case )
__SCREAMING_SNAKE_CASE = torch.load(__snake_case , map_location="cpu" )
if "model" in files:
__SCREAMING_SNAKE_CASE = files["model"]
else:
__SCREAMING_SNAKE_CASE = files["module"]
__SCREAMING_SNAKE_CASE = convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
model.eval()
# verify model on basic input
__SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__SCREAMING_SNAKE_CASE = prepare_video()
__SCREAMING_SNAKE_CASE = image_processor(__snake_case , return_tensors="pt" )
if "finetuned" not in model_name:
__SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
__SCREAMING_SNAKE_CASE = torch.load(__snake_case )
__SCREAMING_SNAKE_CASE = model(**__snake_case )
__SCREAMING_SNAKE_CASE = outputs.logits
__SCREAMING_SNAKE_CASE = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] )
elif model_name == "videomae-small-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] )
elif model_name == "videomae-base":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] )
elif model_name == "videomae-base-short":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] )
# we verified the loss both for normalized and unnormalized targets for this one
__SCREAMING_SNAKE_CASE = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] )
elif model_name == "videomae-large":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] )
elif model_name == "videomae-large-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] )
elif model_name == "videomae-huge-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] )
elif model_name == "videomae-base-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] )
elif model_name == "videomae-base-short-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] )
elif model_name == "videomae-base-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] )
elif model_name == "videomae-base-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __snake_case , atol=1e-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__SCREAMING_SNAKE_CASE = outputs.loss
assert torch.allclose(__snake_case , __snake_case , atol=1e-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
model.save_pretrained(__snake_case )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(__snake_case , organization="nielsr" )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the ๐ค hub.'
)
_snake_case : Optional[int] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 693 | 1 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _A ( __snake_case :Dict ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = VideoMAEConfig()
set_architecture_configs(__snake_case , __snake_case )
if "finetuned" not in model_name:
__SCREAMING_SNAKE_CASE = False
if "finetuned" in model_name:
__SCREAMING_SNAKE_CASE = "huggingface/label-files"
if "kinetics" in model_name:
__SCREAMING_SNAKE_CASE = 400
__SCREAMING_SNAKE_CASE = "kinetics400-id2label.json"
elif "ssv2" in model_name:
__SCREAMING_SNAKE_CASE = 174
__SCREAMING_SNAKE_CASE = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
__SCREAMING_SNAKE_CASE = {int(__snake_case ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def _A ( __snake_case :Dict , __snake_case :Optional[Any] ) -> List[Any]:
"""simple docstring"""
if "small" in model_name:
__SCREAMING_SNAKE_CASE = 384
__SCREAMING_SNAKE_CASE = 1536
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = 192
__SCREAMING_SNAKE_CASE = 768
elif "large" in model_name:
__SCREAMING_SNAKE_CASE = 1024
__SCREAMING_SNAKE_CASE = 4096
__SCREAMING_SNAKE_CASE = 24
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 512
__SCREAMING_SNAKE_CASE = 2048
elif "huge" in model_name:
__SCREAMING_SNAKE_CASE = 1280
__SCREAMING_SNAKE_CASE = 5120
__SCREAMING_SNAKE_CASE = 32
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 640
__SCREAMING_SNAKE_CASE = 2560
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def _A ( __snake_case :List[Any] ) -> Optional[int]:
"""simple docstring"""
if "encoder." in name:
__SCREAMING_SNAKE_CASE = name.replace("encoder." , "" )
if "cls_token" in name:
__SCREAMING_SNAKE_CASE = name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.self" )
if "attn" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.attention" )
if "norm1" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__SCREAMING_SNAKE_CASE = name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__SCREAMING_SNAKE_CASE = name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace("head" , "classifier" )
return name
def _A ( __snake_case :Union[str, Any] , __snake_case :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = orig_state_dict.pop(__snake_case )
if key.startswith("encoder." ):
__SCREAMING_SNAKE_CASE = key.replace("encoder." , "" )
if "qkv" in key:
__SCREAMING_SNAKE_CASE = key.split("." )
if key.startswith("decoder.blocks" ):
__SCREAMING_SNAKE_CASE = config.decoder_hidden_size
__SCREAMING_SNAKE_CASE = int(key_split[2] )
__SCREAMING_SNAKE_CASE = "decoder.decoder_layers."
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = config.hidden_size
__SCREAMING_SNAKE_CASE = int(key_split[1] )
__SCREAMING_SNAKE_CASE = "videomae.encoder.layer."
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = val
return orig_state_dict
def _A ( ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
__SCREAMING_SNAKE_CASE = np.load(__snake_case )
return list(__snake_case )
def _A ( __snake_case :Optional[int] , __snake_case :List[str] , __snake_case :Union[str, Any] , __snake_case :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_videomae_config(__snake_case )
if "finetuned" in model_name:
__SCREAMING_SNAKE_CASE = VideoMAEForVideoClassification(__snake_case )
else:
__SCREAMING_SNAKE_CASE = VideoMAEForPreTraining(__snake_case )
# download original checkpoint, hosted on Google Drive
__SCREAMING_SNAKE_CASE = "pytorch_model.bin"
gdown.cached_download(__snake_case , __snake_case , quiet=__snake_case )
__SCREAMING_SNAKE_CASE = torch.load(__snake_case , map_location="cpu" )
if "model" in files:
__SCREAMING_SNAKE_CASE = files["model"]
else:
__SCREAMING_SNAKE_CASE = files["module"]
__SCREAMING_SNAKE_CASE = convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
model.eval()
# verify model on basic input
__SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__SCREAMING_SNAKE_CASE = prepare_video()
__SCREAMING_SNAKE_CASE = image_processor(__snake_case , return_tensors="pt" )
if "finetuned" not in model_name:
__SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
__SCREAMING_SNAKE_CASE = torch.load(__snake_case )
__SCREAMING_SNAKE_CASE = model(**__snake_case )
__SCREAMING_SNAKE_CASE = outputs.logits
__SCREAMING_SNAKE_CASE = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] )
elif model_name == "videomae-small-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] )
elif model_name == "videomae-base":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] )
elif model_name == "videomae-base-short":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] )
# we verified the loss both for normalized and unnormalized targets for this one
__SCREAMING_SNAKE_CASE = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] )
elif model_name == "videomae-large":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] )
elif model_name == "videomae-large-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] )
elif model_name == "videomae-huge-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] )
elif model_name == "videomae-base-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] )
elif model_name == "videomae-base-short-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] )
elif model_name == "videomae-base-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] )
elif model_name == "videomae-base-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __snake_case , atol=1e-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__SCREAMING_SNAKE_CASE = outputs.loss
assert torch.allclose(__snake_case , __snake_case , atol=1e-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
model.save_pretrained(__snake_case )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(__snake_case , organization="nielsr" )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the ๐ค hub.'
)
_snake_case : Optional[int] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 693 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_snake_case : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
def __init__( self, *_a, **_a ) -> None:
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead.", _a, )
super().__init__(*_a, **_a )
| 693 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case : str = logging.get_logger(__name__)
_snake_case : Any = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : int = {
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
_snake_case : List[str] = {
'gpt2': 10_24,
'gpt2-medium': 10_24,
'gpt2-large': 10_24,
'gpt2-xl': 10_24,
'distilgpt2': 10_24,
}
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ =["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE__ =GPTaTokenizer
def __init__( self, _a=None, _a=None, _a=None, _a="<|endoftext|>", _a="<|endoftext|>", _a="<|endoftext|>", _a=False, **_a, ) -> Union[str, Any]:
super().__init__(
_a, _a, tokenizer_file=_a, unk_token=_a, bos_token=_a, eos_token=_a, add_prefix_space=_a, **_a, )
__SCREAMING_SNAKE_CASE = kwargs.pop("add_bos_token", _a )
__SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space", _a ) != add_prefix_space:
__SCREAMING_SNAKE_CASE = getattr(_a, pre_tok_state.pop("type" ) )
__SCREAMING_SNAKE_CASE = add_prefix_space
__SCREAMING_SNAKE_CASE = pre_tok_class(**_a )
__SCREAMING_SNAKE_CASE = add_prefix_space
def __lowerCAmelCase ( self, *_a, **_a ) -> BatchEncoding:
__SCREAMING_SNAKE_CASE = kwargs.get("is_split_into_words", _a )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_a, **_a )
def __lowerCAmelCase ( self, *_a, **_a ) -> BatchEncoding:
__SCREAMING_SNAKE_CASE = kwargs.get("is_split_into_words", _a )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_a, **_a )
def __lowerCAmelCase ( self, _a, _a = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE = self._tokenizer.model.save(_a, name=_a )
return tuple(_a )
def __lowerCAmelCase ( self, _a ) -> List[int]:
__SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_a, add_special_tokens=_a ) + [self.eos_token_id] )
if len(_a ) > self.model_max_length:
__SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
return input_ids
| 693 |
from math import sqrt
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0
for i in range(1 , int(sqrt(__snake_case ) + 1 ) ):
if n % i == 0 and i != sqrt(__snake_case ):
total += i + n // i
elif i == sqrt(__snake_case ):
total += i
return total - n
def _A ( __snake_case :int = 1_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = sum(
i
for i in range(1 , __snake_case )
if sum_of_divisors(sum_of_divisors(__snake_case ) ) == i and sum_of_divisors(__snake_case ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 693 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
_snake_case : Optional[int] = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def _A ( __snake_case :Any , __snake_case :int=None , __snake_case :int=None , __snake_case :Optional[Any]=None ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = True
while ask_again:
__SCREAMING_SNAKE_CASE = input(__snake_case )
try:
if default is not None and len(__snake_case ) == 0:
return default
return convert_value(__snake_case ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__snake_case )
def _A ( __snake_case :Union[str, Any] , __snake_case :Union[str, Any]=[] , __snake_case :Tuple=None , __snake_case :Optional[Any]=0 ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BulletMenu(__snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = menu.run(default_choice=__snake_case )
return convert_value(__snake_case ) if convert_value is not None else result
def _A ( __snake_case :Optional[Any] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = int(__snake_case )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def _A ( __snake_case :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = int(__snake_case )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def _A ( __snake_case :List[str] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = int(__snake_case )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _A ( __snake_case :Tuple ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = int(__snake_case )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def _A ( __snake_case :str ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = int(__snake_case )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def _A ( __snake_case :Optional[int] ) -> int:
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class __SCREAMING_SNAKE_CASE ( argparse.RawDescriptionHelpFormatter ):
def __lowerCAmelCase ( self, _a, _a, _a, _a ) -> List[str]:
__SCREAMING_SNAKE_CASE = super()._format_usage(_a, _a, _a, _a )
__SCREAMING_SNAKE_CASE = usage.replace("<command> [<args>] ", "" )
return usage
| 693 |
def _A ( __snake_case :int , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = SamImageProcessor()
__SCREAMING_SNAKE_CASE = SamProcessor(_a )
processor.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self, **_a ) -> Optional[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname, **_a ).image_processor
def __lowerCAmelCase ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = [np.random.randint(2_55, size=(3, 30, 4_00), dtype=np.uinta )]
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(_a, 0, -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=_a, padding_value=1.0 )
__SCREAMING_SNAKE_CASE = SamProcessor.from_pretrained(self.tmpdirname, do_normalize=_a, padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, _a )
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = SamProcessor(image_processor=_a )
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = image_processor(_a, return_tensors="np" )
__SCREAMING_SNAKE_CASE = processor(images=_a, return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2 )
@require_torch
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = SamProcessor(image_processor=_a )
__SCREAMING_SNAKE_CASE = [torch.ones((1, 3, 5, 5) )]
__SCREAMING_SNAKE_CASE = [[17_64, 26_46]]
__SCREAMING_SNAKE_CASE = [[6_83, 10_24]]
__SCREAMING_SNAKE_CASE = processor.post_process_masks(_a, _a, _a )
self.assertEqual(masks[0].shape, (1, 3, 17_64, 26_46) )
__SCREAMING_SNAKE_CASE = processor.post_process_masks(
_a, torch.tensor(_a ), torch.tensor(_a ) )
self.assertEqual(masks[0].shape, (1, 3, 17_64, 26_46) )
# should also work with np
__SCREAMING_SNAKE_CASE = [np.ones((1, 3, 5, 5) )]
__SCREAMING_SNAKE_CASE = processor.post_process_masks(_a, np.array(_a ), np.array(_a ) )
self.assertEqual(masks[0].shape, (1, 3, 17_64, 26_46) )
__SCREAMING_SNAKE_CASE = [[1, 0], [0, 1]]
with self.assertRaises(_a ):
__SCREAMING_SNAKE_CASE = processor.post_process_masks(_a, np.array(_a ), np.array(_a ) )
@require_vision
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = SamImageProcessor()
__SCREAMING_SNAKE_CASE = SamProcessor(_a )
processor.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self, **_a ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname, **_a ).image_processor
def __lowerCAmelCase ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = [np.random.randint(2_55, size=(3, 30, 4_00), dtype=np.uinta )]
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(_a, 0, -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=_a, padding_value=1.0 )
__SCREAMING_SNAKE_CASE = SamProcessor.from_pretrained(self.tmpdirname, do_normalize=_a, padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, _a )
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = SamProcessor(image_processor=_a )
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = image_processor(_a, return_tensors="np" )
__SCREAMING_SNAKE_CASE = processor(images=_a, return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2 )
@require_tf
def __lowerCAmelCase ( self ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = SamProcessor(image_processor=_a )
__SCREAMING_SNAKE_CASE = [tf.ones((1, 3, 5, 5) )]
__SCREAMING_SNAKE_CASE = [[17_64, 26_46]]
__SCREAMING_SNAKE_CASE = [[6_83, 10_24]]
__SCREAMING_SNAKE_CASE = processor.post_process_masks(_a, _a, _a, return_tensors="tf" )
self.assertEqual(masks[0].shape, (1, 3, 17_64, 26_46) )
__SCREAMING_SNAKE_CASE = processor.post_process_masks(
_a, tf.convert_to_tensor(_a ), tf.convert_to_tensor(_a ), return_tensors="tf", )
self.assertEqual(masks[0].shape, (1, 3, 17_64, 26_46) )
# should also work with np
__SCREAMING_SNAKE_CASE = [np.ones((1, 3, 5, 5) )]
__SCREAMING_SNAKE_CASE = processor.post_process_masks(
_a, np.array(_a ), np.array(_a ), return_tensors="tf" )
self.assertEqual(masks[0].shape, (1, 3, 17_64, 26_46) )
__SCREAMING_SNAKE_CASE = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__SCREAMING_SNAKE_CASE = processor.post_process_masks(
_a, np.array(_a ), np.array(_a ), return_tensors="tf" )
@require_vision
@require_torchvision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = SamImageProcessor()
__SCREAMING_SNAKE_CASE = SamProcessor(_a )
processor.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self, **_a ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname, **_a ).image_processor
def __lowerCAmelCase ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = [np.random.randint(2_55, size=(3, 30, 4_00), dtype=np.uinta )]
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(_a, 0, -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = SamProcessor(image_processor=_a )
__SCREAMING_SNAKE_CASE = np.random.randint(0, 2, size=(1, 3, 5, 5) ).astype(np.floataa )
__SCREAMING_SNAKE_CASE = [tf.convert_to_tensor(_a )]
__SCREAMING_SNAKE_CASE = [torch.tensor(_a )]
__SCREAMING_SNAKE_CASE = [[17_64, 26_46]]
__SCREAMING_SNAKE_CASE = [[6_83, 10_24]]
__SCREAMING_SNAKE_CASE = processor.post_process_masks(
_a, _a, _a, return_tensors="tf" )
__SCREAMING_SNAKE_CASE = processor.post_process_masks(
_a, _a, _a, return_tensors="pt" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def __lowerCAmelCase ( self ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = SamProcessor(image_processor=_a )
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = image_processor(_a, return_tensors="pt" )["pixel_values"].numpy()
__SCREAMING_SNAKE_CASE = processor(images=_a, return_tensors="pt" )["pixel_values"].numpy()
__SCREAMING_SNAKE_CASE = image_processor(_a, return_tensors="tf" )["pixel_values"].numpy()
__SCREAMING_SNAKE_CASE = processor(images=_a, return_tensors="tf" )["pixel_values"].numpy()
self.assertTrue(np.allclose(_a, _a ) )
self.assertTrue(np.allclose(_a, _a ) )
self.assertTrue(np.allclose(_a, _a ) )
| 693 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a, _a=99, _a=13, _a=7, _a=9, _a=True, _a=True, _a=False, _a=32, _a=5, _a=4, _a=37, _a=8, _a=0.1, _a=0.002, _a=1, _a=0, _a=0, _a=None, _a=None, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = encoder_seq_length
__SCREAMING_SNAKE_CASE = decoder_seq_length
# For common tests
__SCREAMING_SNAKE_CASE = self.decoder_seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_attention_mask
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = d_ff
__SCREAMING_SNAKE_CASE = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE = dropout_rate
__SCREAMING_SNAKE_CASE = initializer_factor
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = decoder_start_token_id
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = decoder_layers
def __lowerCAmelCase ( self ) -> Optional[int]:
return TaConfig.from_pretrained("google/umt5-base" )
def __lowerCAmelCase ( self, _a, _a, _a, _a=None, _a=None, _a=None, _a=None, _a=None, ) -> int:
if attention_mask is None:
__SCREAMING_SNAKE_CASE = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.num_hidden_layers, config.num_attention_heads, device=_a )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.num_decoder_layers, config.num_attention_heads, device=_a )
if cross_attn_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers, config.num_attention_heads, device=_a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__SCREAMING_SNAKE_CASE = input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = decoder_input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = config.num_attention_heads
__SCREAMING_SNAKE_CASE = self.prepare_inputs_dict(_a, _a, _a )
return config, input_dict
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCAmelCase ( self ) -> Optional[int]:
return TaConfig(
vocab_size=1_66, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return TaConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a )
model.to(_a )
model.eval()
__SCREAMING_SNAKE_CASE = model(
input_ids=_a, decoder_input_ids=_a, attention_mask=_a, decoder_attention_mask=_a, )
__SCREAMING_SNAKE_CASE = model(input_ids=_a, decoder_input_ids=_a )
__SCREAMING_SNAKE_CASE = result.last_hidden_state
__SCREAMING_SNAKE_CASE = result.past_key_values
__SCREAMING_SNAKE_CASE = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_a ), config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ), 4 )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Tuple:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).get_decoder().to(_a ).eval()
# first forward pass
__SCREAMING_SNAKE_CASE = model(_a, use_cache=_a )
__SCREAMING_SNAKE_CASE = model(_a )
__SCREAMING_SNAKE_CASE = model(_a, use_cache=_a )
self.parent.assertTrue(len(_a ) == len(_a ) )
self.parent.assertTrue(len(_a ) == len(_a ) + 1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1), config.vocab_size )
# append to next input_ids and
__SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens], dim=-1 )
__SCREAMING_SNAKE_CASE = model(_a )["last_hidden_state"]
__SCREAMING_SNAKE_CASE = model(_a, past_key_values=_a )["last_hidden_state"]
# select random slice
__SCREAMING_SNAKE_CASE = ids_tensor((1,), output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a, _a, atol=1E-3 ) )
def __lowerCAmelCase ( self, _a, _a, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).to(_a ).half().eval()
__SCREAMING_SNAKE_CASE = model(**_a )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(_a ).any().item() )
@require_torch
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =(
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE__ =(UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ =(
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE__ =[0.8, 0.9]
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = UMTaModel(config_and_inputs[0] ).to(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_a, (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]), f'''{tmpdirname}/t5_test.onnx''', export_params=_a, opset_version=9, input_names=["input_ids", "decoder_input_ids"], )
@unittest.skipIf(torch_device == "cpu", "Cant do half precision" )
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_a )
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = config_and_inputs[0]
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(_a ).eval()
model.to(_a )
__SCREAMING_SNAKE_CASE = {
"head_mask": torch.zeros(config.num_layers, config.num_heads, device=_a ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ),
}
for attn_name, (name, mask) in zip(_a, head_masking.items() ):
__SCREAMING_SNAKE_CASE = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers, config.num_heads, device=_a )
__SCREAMING_SNAKE_CASE = model.generate(
config_and_inputs[1]["input_ids"], num_beams=1, max_length=3, output_attentions=_a, return_dict_in_generate=_a, **_a, )
# We check the state of decoder_attentions and cross_attentions just from the last step
__SCREAMING_SNAKE_CASE = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ), 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def __lowerCAmelCase ( self ) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration.from_pretrained("google/umt5-small", return_dict=_a ).to(_a )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("google/umt5-small", use_fast=_a, legacy=_a )
__SCREAMING_SNAKE_CASE = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
__SCREAMING_SNAKE_CASE = tokenizer(_a, return_tensors="pt", padding=_a ).input_ids
# fmt: off
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(_a, _a )
__SCREAMING_SNAKE_CASE = model.generate(input_ids.to(_a ) )
__SCREAMING_SNAKE_CASE = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ <extra_id_56>ajลกietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajลกie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> ํผํด[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_a )
self.assertEqual(_a, _a )
| 693 | 1 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_snake_case : Optional[List[str]] = None
_snake_case : Dict = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
_snake_case : int = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class __SCREAMING_SNAKE_CASE :
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =None
# Automatically constructed
SCREAMING_SNAKE_CASE__ ="PIL.Image.Image"
SCREAMING_SNAKE_CASE__ =pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
SCREAMING_SNAKE_CASE__ =field(default="""Image""" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self ) -> Tuple:
return self.pa_type
def __lowerCAmelCase ( self, _a ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(_a, _a ):
__SCREAMING_SNAKE_CASE = np.array(_a )
if isinstance(_a, _a ):
return {"path": value, "bytes": None}
elif isinstance(_a, _a ):
return {"path": None, "bytes": value}
elif isinstance(_a, np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_a )
elif isinstance(_a, PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_a )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __lowerCAmelCase ( self, _a, _a=None ) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(_a ):
__SCREAMING_SNAKE_CASE = PIL.Image.open(_a )
else:
__SCREAMING_SNAKE_CASE = path.split("::" )[-1]
try:
__SCREAMING_SNAKE_CASE = string_to_dict(_a, config.HUB_DATASETS_URL )["repo_id"]
__SCREAMING_SNAKE_CASE = token_per_repo_id.get(_a )
except ValueError:
__SCREAMING_SNAKE_CASE = None
with xopen(_a, "rb", use_auth_token=_a ) as f:
__SCREAMING_SNAKE_CASE = BytesIO(f.read() )
__SCREAMING_SNAKE_CASE = PIL.Image.open(bytes_ )
else:
__SCREAMING_SNAKE_CASE = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __lowerCAmelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def __lowerCAmelCase ( self, _a ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
__SCREAMING_SNAKE_CASE = storage.field("bytes" )
else:
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
__SCREAMING_SNAKE_CASE = storage.field("path" )
else:
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
__SCREAMING_SNAKE_CASE = pa.array(
[encode_np_array(np.array(_a ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), )
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays(
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(_a, self.pa_type )
def __lowerCAmelCase ( self, _a ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_a ):
with xopen(_a, "rb" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
return bytes_
__SCREAMING_SNAKE_CASE = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
__SCREAMING_SNAKE_CASE = pa.array(
[os.path.basename(_a ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(_a, self.pa_type )
def _A ( ) -> List[str]:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
__SCREAMING_SNAKE_CASE = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _A ( __snake_case :"PIL.Image.Image" ) -> bytes:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BytesIO()
if image.format in list_image_compression_formats():
__SCREAMING_SNAKE_CASE = image.format
else:
__SCREAMING_SNAKE_CASE = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(__snake_case , format=__snake_case )
return buffer.getvalue()
def _A ( __snake_case :"PIL.Image.Image" ) -> dict:
"""simple docstring"""
if hasattr(__snake_case , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__snake_case )}
def _A ( __snake_case :np.ndarray ) -> dict:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
__SCREAMING_SNAKE_CASE = array.dtype
__SCREAMING_SNAKE_CASE = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
__SCREAMING_SNAKE_CASE = dtype.kind
__SCREAMING_SNAKE_CASE = dtype.itemsize
__SCREAMING_SNAKE_CASE = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
__SCREAMING_SNAKE_CASE = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
__SCREAMING_SNAKE_CASE = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
__SCREAMING_SNAKE_CASE = dtype_byteorder + dtype_kind + str(__snake_case )
__SCREAMING_SNAKE_CASE = np.dtype(__snake_case )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
__SCREAMING_SNAKE_CASE = PIL.Image.fromarray(array.astype(__snake_case ) )
return {"path": None, "bytes": image_to_bytes(__snake_case )}
def _A ( __snake_case :Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = first_non_null_value(__snake_case )
if isinstance(__snake_case , __snake_case ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__snake_case , np.ndarray ):
__SCREAMING_SNAKE_CASE = no_op_if_value_is_null(__snake_case )
return [obj_to_image_dict_func(__snake_case ) for obj in objs]
elif isinstance(__snake_case , PIL.Image.Image ):
__SCREAMING_SNAKE_CASE = no_op_if_value_is_null(__snake_case )
return [obj_to_image_dict_func(__snake_case ) for obj in objs]
else:
return objs
else:
return objs
| 693 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _A ( __snake_case :BertModel , __snake_case :str , __snake_case :str ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
__SCREAMING_SNAKE_CASE = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__snake_case ):
os.makedirs(__snake_case )
__SCREAMING_SNAKE_CASE = model.state_dict()
def to_tf_var_name(__snake_case :str ):
for patt, repl in iter(__snake_case ):
__SCREAMING_SNAKE_CASE = name.replace(__snake_case , __snake_case )
return f'''bert/{name}'''
def create_tf_var(__snake_case :np.ndarray , __snake_case :str , __snake_case :tf.Session ):
__SCREAMING_SNAKE_CASE = tf.dtypes.as_dtype(tensor.dtype )
__SCREAMING_SNAKE_CASE = tf.get_variable(dtype=__snake_case , shape=tensor.shape , name=__snake_case , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__snake_case )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__SCREAMING_SNAKE_CASE = to_tf_var_name(__snake_case )
__SCREAMING_SNAKE_CASE = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__SCREAMING_SNAKE_CASE = torch_tensor.T
__SCREAMING_SNAKE_CASE = create_tf_var(tensor=__snake_case , name=__snake_case , session=__snake_case )
tf.keras.backend.set_value(__snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = session.run(__snake_case )
print(f'''Successfully created {tf_name}: {np.allclose(__snake_case , __snake_case )}''' )
__SCREAMING_SNAKE_CASE = tf.train.Saver(tf.trainable_variables() )
saver.save(__snake_case , os.path.join(__snake_case , model_name.replace("-" , "_" ) + ".ckpt" ) )
def _A ( __snake_case :str=None ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=__snake_case , required=__snake_case , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=__snake_case , default=__snake_case , required=__snake_case , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=__snake_case , required=__snake_case , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=__snake_case , required=__snake_case , help="Directory in which to save tensorflow model" )
__SCREAMING_SNAKE_CASE = parser.parse_args(__snake_case )
__SCREAMING_SNAKE_CASE = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__snake_case , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 693 | 1 |
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__snake_case , __snake_case ):
return 0
elif n == 2:
return 1
else:
__SCREAMING_SNAKE_CASE = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 2
while digits < n:
index += 1
__SCREAMING_SNAKE_CASE = len(str(fibonacci(__snake_case ) ) )
return index
def _A ( __snake_case :int = 1000 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__snake_case )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 693 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_snake_case : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =["""input_values""", """padding_mask"""]
def __init__( self, _a = 1, _a = 2_40_00, _a = 0.0, _a = None, _a = None, **_a, ) -> str:
super().__init__(feature_size=_a, sampling_rate=_a, padding_value=_a, **_a )
__SCREAMING_SNAKE_CASE = chunk_length_s
__SCREAMING_SNAKE_CASE = overlap
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1, int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self, _a, _a = None, _a = False, _a = None, _a = None, _a = None, ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = bool(
isinstance(_a, (list, tuple) ) and (isinstance(raw_audio[0], (np.ndarray, tuple, list) )) )
if is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray(_a, dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_a, np.ndarray ):
__SCREAMING_SNAKE_CASE = np.asarray(_a, dtype=np.floataa )
elif isinstance(_a, np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray(_a ).T]
# verify inputs are valid
for idx, example in enumerate(_a ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
__SCREAMING_SNAKE_CASE = min(array.shape[0] for array in raw_audio )
__SCREAMING_SNAKE_CASE = int(np.floor(max_length / self.chunk_stride ) )
__SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
__SCREAMING_SNAKE_CASE = max(array.shape[0] for array in raw_audio )
__SCREAMING_SNAKE_CASE = int(np.ceil(max_length / self.chunk_stride ) )
__SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length
__SCREAMING_SNAKE_CASE = "max_length"
else:
__SCREAMING_SNAKE_CASE = input_values
# normal padding on batch
if padded_inputs is None:
__SCREAMING_SNAKE_CASE = self.pad(
_a, max_length=_a, truncation=_a, padding=_a, return_attention_mask=_a, )
if padding:
__SCREAMING_SNAKE_CASE = padded_inputs.pop("attention_mask" )
__SCREAMING_SNAKE_CASE = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
__SCREAMING_SNAKE_CASE = example[..., None]
input_values.append(example.T )
__SCREAMING_SNAKE_CASE = input_values
if return_tensors is not None:
__SCREAMING_SNAKE_CASE = padded_inputs.convert_to_tensors(_a )
return padded_inputs
| 693 | 1 |
# Imports
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a=None, _a=None, _a=None, _a=None, _a=None ) -> List[Any]:
self.set_matricies(red=_a, green=_a, blue=_a, red_edge=_a, nir=_a )
def __lowerCAmelCase ( self, _a=None, _a=None, _a=None, _a=None, _a=None ) -> Any:
if red is not None:
__SCREAMING_SNAKE_CASE = red
if green is not None:
__SCREAMING_SNAKE_CASE = green
if blue is not None:
__SCREAMING_SNAKE_CASE = blue
if red_edge is not None:
__SCREAMING_SNAKE_CASE = red_edge
if nir is not None:
__SCREAMING_SNAKE_CASE = nir
return True
def __lowerCAmelCase ( self, _a="", _a=None, _a=None, _a=None, _a=None, _a=None ) -> int:
self.set_matricies(red=_a, green=_a, blue=_a, red_edge=_a, nir=_a )
__SCREAMING_SNAKE_CASE = {
"ARVI2": self.arvaa,
"CCCI": self.ccci,
"CVI": self.cvi,
"GLI": self.gli,
"NDVI": self.ndvi,
"BNDVI": self.bndvi,
"redEdgeNDVI": self.red_edge_ndvi,
"GNDVI": self.gndvi,
"GBNDVI": self.gbndvi,
"GRNDVI": self.grndvi,
"RBNDVI": self.rbndvi,
"PNDVI": self.pndvi,
"ATSAVI": self.atsavi,
"BWDRVI": self.bwdrvi,
"CIgreen": self.ci_green,
"CIrededge": self.ci_rededge,
"CI": self.ci,
"CTVI": self.ctvi,
"GDVI": self.gdvi,
"EVI": self.evi,
"GEMI": self.gemi,
"GOSAVI": self.gosavi,
"GSAVI": self.gsavi,
"Hue": self.hue,
"IVI": self.ivi,
"IPVI": self.ipvi,
"I": self.i,
"RVI": self.rvi,
"MRVI": self.mrvi,
"MSAVI": self.m_savi,
"NormG": self.norm_g,
"NormNIR": self.norm_nir,
"NormR": self.norm_r,
"NGRDI": self.ngrdi,
"RI": self.ri,
"S": self.s,
"IF": self._if,
"DVI": self.dvi,
"TVI": self.tvi,
"NDRE": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("Index not in the list!" )
return False
def __lowerCAmelCase ( self ) -> int:
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def __lowerCAmelCase ( self ) -> int:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def __lowerCAmelCase ( self ) -> str:
return self.nir * (self.red / (self.green**2))
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def __lowerCAmelCase ( self ) -> Tuple:
return (self.nir - self.red) / (self.nir + self.red)
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return (self.nir - self.blue) / (self.nir + self.blue)
def __lowerCAmelCase ( self ) -> List[str]:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def __lowerCAmelCase ( self ) -> List[Any]:
return (self.nir - self.green) / (self.nir + self.green)
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def __lowerCAmelCase ( self ) -> int:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def __lowerCAmelCase ( self ) -> List[str]:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def __lowerCAmelCase ( self ) -> int:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def __lowerCAmelCase ( self, _a=0.08, _a=1.22, _a=0.03 ) -> Union[str, Any]:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def __lowerCAmelCase ( self ) -> str:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def __lowerCAmelCase ( self ) -> List[Any]:
return (self.nir / self.green) - 1
def __lowerCAmelCase ( self ) -> Optional[Any]:
return (self.nir / self.redEdge) - 1
def __lowerCAmelCase ( self ) -> int:
return (self.red - self.blue) / self.red
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def __lowerCAmelCase ( self ) -> int:
return self.nir - self.green
def __lowerCAmelCase ( self ) -> str:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def __lowerCAmelCase ( self ) -> int:
__SCREAMING_SNAKE_CASE = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def __lowerCAmelCase ( self, _a=0.16 ) -> Optional[Any]:
return (self.nir - self.green) / (self.nir + self.green + y)
def __lowerCAmelCase ( self, _a=0.5 ) -> Dict:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def __lowerCAmelCase ( self ) -> Dict:
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def __lowerCAmelCase ( self, _a=None, _a=None ) -> Optional[int]:
return (self.nir - b) / (a * self.red)
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def __lowerCAmelCase ( self ) -> Tuple:
return (self.red + self.green + self.blue) / 30.5
def __lowerCAmelCase ( self ) -> Optional[int]:
return self.nir / self.red
def __lowerCAmelCase ( self ) -> Optional[int]:
return (self.rvi() - 1) / (self.rvi() + 1)
def __lowerCAmelCase ( self ) -> str:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def __lowerCAmelCase ( self ) -> Tuple:
return self.green / (self.nir + self.red + self.green)
def __lowerCAmelCase ( self ) -> Optional[int]:
return self.nir / (self.nir + self.red + self.green)
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self.red / (self.nir + self.red + self.green)
def __lowerCAmelCase ( self ) -> List[Any]:
return (self.green - self.red) / (self.green + self.red)
def __lowerCAmelCase ( self ) -> Tuple:
return (self.red - self.green) / (self.red + self.green)
def __lowerCAmelCase ( self ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
__SCREAMING_SNAKE_CASE = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def __lowerCAmelCase ( self ) -> List[str]:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def __lowerCAmelCase ( self ) -> List[str]:
return self.nir / self.red
def __lowerCAmelCase ( self ) -> Tuple:
return (self.ndvi() + 0.5) ** (1 / 2)
def __lowerCAmelCase ( self ) -> List[str]:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 693 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =42
SCREAMING_SNAKE_CASE__ =42
def __init__( self, _a, _a ) -> Dict:
super().__init__()
self.register_modules(unet=_a, scheduler=_a )
@torch.no_grad()
def __call__( self, _a = 1, _a = 20_00, _a = None, _a = "pil", _a = True, **_a, ) -> Union[ImagePipelineOutput, Tuple]:
__SCREAMING_SNAKE_CASE = self.unet.config.sample_size
__SCREAMING_SNAKE_CASE = (batch_size, 3, img_size, img_size)
__SCREAMING_SNAKE_CASE = self.unet
__SCREAMING_SNAKE_CASE = randn_tensor(_a, generator=_a ) * self.scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE = sample.to(self.device )
self.scheduler.set_timesteps(_a )
self.scheduler.set_sigmas(_a )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__SCREAMING_SNAKE_CASE = self.scheduler.sigmas[i] * torch.ones(shape[0], device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__SCREAMING_SNAKE_CASE = self.unet(_a, _a ).sample
__SCREAMING_SNAKE_CASE = self.scheduler.step_correct(_a, _a, generator=_a ).prev_sample
# prediction step
__SCREAMING_SNAKE_CASE = model(_a, _a ).sample
__SCREAMING_SNAKE_CASE = self.scheduler.step_pred(_a, _a, _a, generator=_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output.prev_sample, output.prev_sample_mean
__SCREAMING_SNAKE_CASE = sample_mean.clamp(0, 1 )
__SCREAMING_SNAKE_CASE = sample.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE = self.numpy_to_pil(_a )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_a )
| 693 | 1 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
_snake_case : int = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
def __lowerCAmelCase ( self, _a, _a, _a=None, _a=None ) -> int:
__SCREAMING_SNAKE_CASE = self.layer[current_layer](_a, _a, head_mask[current_layer] )
__SCREAMING_SNAKE_CASE = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , __SCREAMING_SNAKE_CASE , )
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
def __init__( self, _a ) -> Any:
super().__init__(_a )
__SCREAMING_SNAKE_CASE = BertEncoderWithPabee(_a )
self.init_weights()
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
def __lowerCAmelCase ( self, _a ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = threshold
def __lowerCAmelCase ( self, _a ) -> Tuple:
__SCREAMING_SNAKE_CASE = patience
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
def __lowerCAmelCase ( self ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = self.inference_layers_num / self.inference_instances_num
__SCREAMING_SNAKE_CASE = (
f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(_a )
@add_start_docstrings_to_model_forward(_a )
def __lowerCAmelCase ( self, _a=None, _a=None, _a=None, _a=None, _a=None, _a=None, _a=None, _a=None, _a=None, _a=None, _a=False, ) -> Union[str, Any]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__SCREAMING_SNAKE_CASE = input_ids.size()
elif inputs_embeds is not None:
__SCREAMING_SNAKE_CASE = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__SCREAMING_SNAKE_CASE = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(_a, device=_a )
if token_type_ids is None:
__SCREAMING_SNAKE_CASE = torch.zeros(_a, dtype=torch.long, device=_a )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__SCREAMING_SNAKE_CASE = self.get_extended_attention_mask(_a, _a, _a )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = encoder_hidden_states.size()
__SCREAMING_SNAKE_CASE = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(_a, device=_a )
__SCREAMING_SNAKE_CASE = self.invert_attention_mask(_a )
else:
__SCREAMING_SNAKE_CASE = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__SCREAMING_SNAKE_CASE = self.get_head_mask(_a, self.config.num_hidden_layers )
__SCREAMING_SNAKE_CASE = self.embeddings(
input_ids=_a, position_ids=_a, token_type_ids=_a, inputs_embeds=_a )
__SCREAMING_SNAKE_CASE = embedding_output
if self.training:
__SCREAMING_SNAKE_CASE = []
for i in range(self.config.num_hidden_layers ):
__SCREAMING_SNAKE_CASE = self.encoder.adaptive_forward(
_a, current_layer=_a, attention_mask=_a, head_mask=_a )
__SCREAMING_SNAKE_CASE = self.pooler(_a )
__SCREAMING_SNAKE_CASE = output_layers[i](output_dropout(_a ) )
res.append(_a )
elif self.patience == 0: # Use all layers for inference
__SCREAMING_SNAKE_CASE = self.encoder(
_a, attention_mask=_a, head_mask=_a, encoder_hidden_states=_a, encoder_attention_mask=_a, )
__SCREAMING_SNAKE_CASE = self.pooler(encoder_outputs[0] )
__SCREAMING_SNAKE_CASE = [output_layers[self.config.num_hidden_layers - 1](_a )]
else:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__SCREAMING_SNAKE_CASE = self.encoder.adaptive_forward(
_a, current_layer=_a, attention_mask=_a, head_mask=_a )
__SCREAMING_SNAKE_CASE = self.pooler(_a )
__SCREAMING_SNAKE_CASE = output_layers[i](_a )
if regression:
__SCREAMING_SNAKE_CASE = logits.detach()
if patient_result is not None:
__SCREAMING_SNAKE_CASE = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__SCREAMING_SNAKE_CASE = 0
else:
__SCREAMING_SNAKE_CASE = logits.detach().argmax(dim=1 )
if patient_result is not None:
__SCREAMING_SNAKE_CASE = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_a ) ):
patient_counter += 1
else:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = logits
if patient_counter == self.patience:
break
__SCREAMING_SNAKE_CASE = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , __SCREAMING_SNAKE_CASE , )
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
def __init__( self, _a ) -> Union[str, Any]:
super().__init__(_a )
__SCREAMING_SNAKE_CASE = config.num_labels
__SCREAMING_SNAKE_CASE = BertModelWithPabee(_a )
__SCREAMING_SNAKE_CASE = nn.Dropout(config.hidden_dropout_prob )
__SCREAMING_SNAKE_CASE = nn.ModuleList(
[nn.Linear(config.hidden_size, self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_a )
def __lowerCAmelCase ( self, _a=None, _a=None, _a=None, _a=None, _a=None, _a=None, _a=None, ) -> int:
__SCREAMING_SNAKE_CASE = self.bert(
input_ids=_a, attention_mask=_a, token_type_ids=_a, position_ids=_a, head_mask=_a, inputs_embeds=_a, output_dropout=self.dropout, output_layers=self.classifiers, regression=self.num_labels == 1, )
__SCREAMING_SNAKE_CASE = (logits[-1],)
if labels is not None:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 0
for ix, logits_item in enumerate(_a ):
if self.num_labels == 1:
# We are doing regression
__SCREAMING_SNAKE_CASE = MSELoss()
__SCREAMING_SNAKE_CASE = loss_fct(logits_item.view(-1 ), labels.view(-1 ) )
else:
__SCREAMING_SNAKE_CASE = CrossEntropyLoss()
__SCREAMING_SNAKE_CASE = loss_fct(logits_item.view(-1, self.num_labels ), labels.view(-1 ) )
if total_loss is None:
__SCREAMING_SNAKE_CASE = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__SCREAMING_SNAKE_CASE = (total_loss / total_weights,) + outputs
return outputs
| 693 |
def _A ( __snake_case :int = 400_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__snake_case )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = b, a + b
return sum(__snake_case )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 693 | 1 |
import math
from collections.abc import Callable
def _A ( __snake_case :Callable[[float], float] , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = xa
__SCREAMING_SNAKE_CASE = xa
while True:
if x_n == x_na or function(__snake_case ) == function(__snake_case ):
raise ZeroDivisionError("float division by zero, could not find root" )
__SCREAMING_SNAKE_CASE = x_na - (
function(__snake_case ) / ((function(__snake_case ) - function(__snake_case )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
__SCREAMING_SNAKE_CASE = x_na
__SCREAMING_SNAKE_CASE = x_na
def _A ( __snake_case :float ) -> float:
"""simple docstring"""
return math.pow(__snake_case , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 693 |
from __future__ import annotations
_snake_case : str = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_snake_case : Optional[int] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = len(__snake_case )
for i in range(__snake_case ):
__SCREAMING_SNAKE_CASE = -1
for j in range(i + 1 , __snake_case ):
if arr[i] < arr[j]:
__SCREAMING_SNAKE_CASE = arr[j]
break
result.append(__snake_case )
return result
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for i, outer in enumerate(__snake_case ):
__SCREAMING_SNAKE_CASE = -1
for inner in arr[i + 1 :]:
if outer < inner:
__SCREAMING_SNAKE_CASE = inner
break
result.append(__snake_case )
return result
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = len(__snake_case )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [-1] * arr_size
for index in reversed(range(__snake_case ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__SCREAMING_SNAKE_CASE = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_snake_case : Optional[Any] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 693 | 1 |
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 |
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a ) -> Any:
__SCREAMING_SNAKE_CASE = data
__SCREAMING_SNAKE_CASE = None
def __repr__( self ) -> str:
return f'''Node({self.data})'''
class __SCREAMING_SNAKE_CASE :
def __init__( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = None
def __iter__( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.head
while node:
yield node.data
__SCREAMING_SNAKE_CASE = node.next
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join([str(_a ) for item in self] )
def __getitem__( self, _a ) -> Any:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self, _a, _a ) -> None:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
__SCREAMING_SNAKE_CASE = self.head
for _ in range(_a ):
__SCREAMING_SNAKE_CASE = current.next
__SCREAMING_SNAKE_CASE = data
def __lowerCAmelCase ( self, _a ) -> None:
self.insert_nth(len(self ), _a )
def __lowerCAmelCase ( self, _a ) -> None:
self.insert_nth(0, _a )
def __lowerCAmelCase ( self, _a, _a ) -> None:
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
__SCREAMING_SNAKE_CASE = Node(_a )
if self.head is None:
__SCREAMING_SNAKE_CASE = new_node
elif index == 0:
__SCREAMING_SNAKE_CASE = self.head # link new_node to head
__SCREAMING_SNAKE_CASE = new_node
else:
__SCREAMING_SNAKE_CASE = self.head
for _ in range(index - 1 ):
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = new_node
def __lowerCAmelCase ( self ) -> None: # print every node data
print(self )
def __lowerCAmelCase ( self ) -> Any:
return self.delete_nth(0 )
def __lowerCAmelCase ( self ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def __lowerCAmelCase ( self, _a = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
__SCREAMING_SNAKE_CASE = self.head # default first node
if index == 0:
__SCREAMING_SNAKE_CASE = self.head.next
else:
__SCREAMING_SNAKE_CASE = self.head
for _ in range(index - 1 ):
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next.next
return delete_node.data
def __lowerCAmelCase ( self ) -> bool:
return self.head is None
def __lowerCAmelCase ( self ) -> None:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = self.head
while current:
# Store the current node's next node.
__SCREAMING_SNAKE_CASE = current.next
# Make the current node's next point backwards
__SCREAMING_SNAKE_CASE = prev
# Make the previous node be the current node
__SCREAMING_SNAKE_CASE = current
# Make the current node the next node (to progress iteration)
__SCREAMING_SNAKE_CASE = next_node
# Return prev in order to put the head at the end
__SCREAMING_SNAKE_CASE = prev
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = LinkedList()
assert linked_list.is_empty() is True
assert str(__snake_case ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__snake_case ) == i
linked_list.insert_nth(__snake_case , i + 1 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__snake_case ) == 9
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__SCREAMING_SNAKE_CASE = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(-8 , 1 ) )
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [
-9,
100,
Node(7734_5112 ),
"dlrow olleH",
7,
5555,
0,
-1_9_2.5_5_5_5_5,
"Hello, world!",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
__SCREAMING_SNAKE_CASE = LinkedList()
for i in test_input:
linked_list.insert_tail(__snake_case )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__SCREAMING_SNAKE_CASE = linked_list.delete_head()
assert result == -9
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__SCREAMING_SNAKE_CASE = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__SCREAMING_SNAKE_CASE = linked_list.delete_nth(10 )
assert result is None
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__snake_case )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__snake_case )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _A ( ) -> Union[str, Any]:
"""simple docstring"""
from doctest import testmod
testmod()
__SCREAMING_SNAKE_CASE = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(__snake_case )
print("\nReading/changing Node data using indexing:" )
print(f'''Element at Position 1: {linked_list[1]}''' )
__SCREAMING_SNAKE_CASE = input("Enter New Value: " ).strip()
print("New list:" )
print(__snake_case )
print(f'''length of linked_list is : {len(__snake_case )}''' )
if __name__ == "__main__":
main()
| 693 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 693 |
import argparse
import json
from tqdm import tqdm
def _A ( ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=__snake_case , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=__snake_case , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=__snake_case , help="where to store parsed gold_data_path file" , )
__SCREAMING_SNAKE_CASE = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
__SCREAMING_SNAKE_CASE = json.load(__snake_case )
for dpr_record in tqdm(__snake_case ):
__SCREAMING_SNAKE_CASE = dpr_record["question"]
__SCREAMING_SNAKE_CASE = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(__snake_case ) + "\n" )
if __name__ == "__main__":
main()
| 693 | 1 |
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a ) -> Any:
__SCREAMING_SNAKE_CASE = data
__SCREAMING_SNAKE_CASE = None
def __repr__( self ) -> str:
return f'''Node({self.data})'''
class __SCREAMING_SNAKE_CASE :
def __init__( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = None
def __iter__( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.head
while node:
yield node.data
__SCREAMING_SNAKE_CASE = node.next
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join([str(_a ) for item in self] )
def __getitem__( self, _a ) -> Any:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self, _a, _a ) -> None:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
__SCREAMING_SNAKE_CASE = self.head
for _ in range(_a ):
__SCREAMING_SNAKE_CASE = current.next
__SCREAMING_SNAKE_CASE = data
def __lowerCAmelCase ( self, _a ) -> None:
self.insert_nth(len(self ), _a )
def __lowerCAmelCase ( self, _a ) -> None:
self.insert_nth(0, _a )
def __lowerCAmelCase ( self, _a, _a ) -> None:
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
__SCREAMING_SNAKE_CASE = Node(_a )
if self.head is None:
__SCREAMING_SNAKE_CASE = new_node
elif index == 0:
__SCREAMING_SNAKE_CASE = self.head # link new_node to head
__SCREAMING_SNAKE_CASE = new_node
else:
__SCREAMING_SNAKE_CASE = self.head
for _ in range(index - 1 ):
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = new_node
def __lowerCAmelCase ( self ) -> None: # print every node data
print(self )
def __lowerCAmelCase ( self ) -> Any:
return self.delete_nth(0 )
def __lowerCAmelCase ( self ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def __lowerCAmelCase ( self, _a = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
__SCREAMING_SNAKE_CASE = self.head # default first node
if index == 0:
__SCREAMING_SNAKE_CASE = self.head.next
else:
__SCREAMING_SNAKE_CASE = self.head
for _ in range(index - 1 ):
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next.next
return delete_node.data
def __lowerCAmelCase ( self ) -> bool:
return self.head is None
def __lowerCAmelCase ( self ) -> None:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = self.head
while current:
# Store the current node's next node.
__SCREAMING_SNAKE_CASE = current.next
# Make the current node's next point backwards
__SCREAMING_SNAKE_CASE = prev
# Make the previous node be the current node
__SCREAMING_SNAKE_CASE = current
# Make the current node the next node (to progress iteration)
__SCREAMING_SNAKE_CASE = next_node
# Return prev in order to put the head at the end
__SCREAMING_SNAKE_CASE = prev
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = LinkedList()
assert linked_list.is_empty() is True
assert str(__snake_case ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__snake_case ) == i
linked_list.insert_nth(__snake_case , i + 1 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__snake_case ) == 9
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__SCREAMING_SNAKE_CASE = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(-8 , 1 ) )
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [
-9,
100,
Node(7734_5112 ),
"dlrow olleH",
7,
5555,
0,
-1_9_2.5_5_5_5_5,
"Hello, world!",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
__SCREAMING_SNAKE_CASE = LinkedList()
for i in test_input:
linked_list.insert_tail(__snake_case )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__SCREAMING_SNAKE_CASE = linked_list.delete_head()
assert result == -9
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__SCREAMING_SNAKE_CASE = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__SCREAMING_SNAKE_CASE = linked_list.delete_nth(10 )
assert result is None
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__snake_case )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__snake_case )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _A ( ) -> Union[str, Any]:
"""simple docstring"""
from doctest import testmod
testmod()
__SCREAMING_SNAKE_CASE = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(__snake_case )
print("\nReading/changing Node data using indexing:" )
print(f'''Element at Position 1: {linked_list[1]}''' )
__SCREAMING_SNAKE_CASE = input("Enter New Value: " ).strip()
print("New list:" )
print(__snake_case )
print(f'''length of linked_list is : {len(__snake_case )}''' )
if __name__ == "__main__":
main()
| 693 |
def _A ( __snake_case :int = 10**9 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 693 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_snake_case : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =["""pixel_values"""]
def __init__( self, _a = True, _a = None, _a = None, _a = PILImageResampling.BILINEAR, _a = True, _a = 1 / 2_55, _a = True, _a = None, _a = None, **_a, ) -> None:
super().__init__(**_a )
__SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 3_84}
__SCREAMING_SNAKE_CASE = get_size_dict(_a, default_to_square=_a )
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
# Default value set here for backwards compatibility where the value in config is None
__SCREAMING_SNAKE_CASE = crop_pct if crop_pct is not None else 2_24 / 2_56
__SCREAMING_SNAKE_CASE = resample
__SCREAMING_SNAKE_CASE = do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self, _a, _a, _a, _a = PILImageResampling.BICUBIC, _a = None, **_a, ) -> np.ndarray:
__SCREAMING_SNAKE_CASE = get_size_dict(_a, default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
__SCREAMING_SNAKE_CASE = size["shortest_edge"]
if shortest_edge < 3_84:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__SCREAMING_SNAKE_CASE = int(shortest_edge / crop_pct )
__SCREAMING_SNAKE_CASE = get_resize_output_image_size(_a, size=_a, default_to_square=_a )
__SCREAMING_SNAKE_CASE = resize(image=_a, size=_a, resample=_a, data_format=_a, **_a )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_a, size=(shortest_edge, shortest_edge), data_format=_a, **_a )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_a, size=(shortest_edge, shortest_edge), resample=_a, data_format=_a, **_a )
def __lowerCAmelCase ( self, _a, _a, _a = None, **_a, ) -> Dict:
return rescale(_a, scale=_a, data_format=_a, **_a )
def __lowerCAmelCase ( self, _a, _a, _a, _a = None, **_a, ) -> np.ndarray:
return normalize(_a, mean=_a, std=_a, data_format=_a, **_a )
def __lowerCAmelCase ( self, _a, _a = None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = ChannelDimension.FIRST, **_a, ) -> PIL.Image.Image:
__SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE = crop_pct if crop_pct is not None else self.crop_pct
__SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE = size if size is not None else self.size
__SCREAMING_SNAKE_CASE = get_size_dict(_a, default_to_square=_a )
__SCREAMING_SNAKE_CASE = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 3_84 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE = [to_numpy_array(_a ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE = [self.resize(image=_a, size=_a, crop_pct=_a, resample=_a ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE = [self.rescale(image=_a, scale=_a ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE = [self.normalize(image=_a, mean=_a, std=_a ) for image in images]
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(_a, _a ) for image in images]
__SCREAMING_SNAKE_CASE = {"pixel_values": images}
return BatchFeature(data=_a, tensor_type=_a )
| 693 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_snake_case , _snake_case , _snake_case : List[Any] = False, False, False
@dataclass
class __SCREAMING_SNAKE_CASE :
SCREAMING_SNAKE_CASE__ =None
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =None
# Automatically constructed
SCREAMING_SNAKE_CASE__ ="dict"
SCREAMING_SNAKE_CASE__ =pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
SCREAMING_SNAKE_CASE__ =field(default="""Audio""" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self ) -> Optional[int]:
return self.pa_type
def __lowerCAmelCase ( self, _a ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(_a, _a ):
return {"bytes": None, "path": value}
elif isinstance(_a, _a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__SCREAMING_SNAKE_CASE = BytesIO()
sf.write(_a, value["array"], value["sampling_rate"], format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__SCREAMING_SNAKE_CASE = np.frombuffer(value["bytes"], dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
__SCREAMING_SNAKE_CASE = np.memmap(value["path"], dtype="h", mode="r" ).astype(np.floataa ) / 3_27_67
__SCREAMING_SNAKE_CASE = BytesIO(bytes() )
sf.write(_a, _a, value["sampling_rate"], format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __lowerCAmelCase ( self, _a, _a = None ) -> dict:
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
__SCREAMING_SNAKE_CASE = xsplitext(_a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
__SCREAMING_SNAKE_CASE = token_per_repo_id or {}
__SCREAMING_SNAKE_CASE = path.split("::" )[-1]
try:
__SCREAMING_SNAKE_CASE = string_to_dict(_a, config.HUB_DATASETS_URL )["repo_id"]
__SCREAMING_SNAKE_CASE = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__SCREAMING_SNAKE_CASE = None
with xopen(_a, "rb", use_auth_token=_a ) as f:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a )
__SCREAMING_SNAKE_CASE = array.T
if self.mono:
__SCREAMING_SNAKE_CASE = librosa.to_mono(_a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
__SCREAMING_SNAKE_CASE = librosa.resample(_a, orig_sr=_a, target_sr=self.sampling_rate )
__SCREAMING_SNAKE_CASE = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __lowerCAmelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def __lowerCAmelCase ( self, _a ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
__SCREAMING_SNAKE_CASE = pa.array([Audio().encode_example(_a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
__SCREAMING_SNAKE_CASE = storage.field("bytes" )
else:
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
__SCREAMING_SNAKE_CASE = storage.field("path" )
else:
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
return array_cast(_a, self.pa_type )
def __lowerCAmelCase ( self, _a ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_a ):
with xopen(_a, "rb" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
return bytes_
__SCREAMING_SNAKE_CASE = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
__SCREAMING_SNAKE_CASE = pa.array(
[os.path.basename(_a ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(_a, self.pa_type )
| 693 | 1 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =VideoToVideoSDPipeline
SCREAMING_SNAKE_CASE__ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
SCREAMING_SNAKE_CASE__ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
SCREAMING_SNAKE_CASE__ =PipelineTesterMixin.required_optional_params - {"""latents"""}
SCREAMING_SNAKE_CASE__ =False
# No `output_type`.
SCREAMING_SNAKE_CASE__ =frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def __lowerCAmelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D"), up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D"), cross_attention_dim=32, attention_head_dim=4, )
__SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_0085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=_a, set_alpha_to_one=_a, )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=1_28, )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-0_5, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=10_00, hidden_act="gelu", projection_dim=5_12, )
__SCREAMING_SNAKE_CASE = CLIPTextModel(_a )
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__SCREAMING_SNAKE_CASE = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __lowerCAmelCase ( self, _a, _a=0 ) -> Optional[int]:
# 3 frames
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 3, 32, 32), rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("mps" ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(_a )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=_a ).manual_seed(_a )
__SCREAMING_SNAKE_CASE = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = "cpu" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = VideoToVideoSDPipeline(**_a )
__SCREAMING_SNAKE_CASE = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(_a )
__SCREAMING_SNAKE_CASE = "np"
__SCREAMING_SNAKE_CASE = sd_pipe(**_a ).frames
__SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__SCREAMING_SNAKE_CASE = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", )
def __lowerCAmelCase ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_a, expected_max_diff=5E-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def __lowerCAmelCase ( self ) -> str:
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def __lowerCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def __lowerCAmelCase ( self ) -> int:
pass
def __lowerCAmelCase ( self ) -> List[str]:
return super().test_progress_bar()
@slow
@skip_mps
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL", torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__SCREAMING_SNAKE_CASE = torch.Generator(device="cpu" ).manual_seed(0 )
__SCREAMING_SNAKE_CASE = torch.randn((1, 10, 3, 10_24, 5_76), generator=_a )
__SCREAMING_SNAKE_CASE = video.to("cuda" )
__SCREAMING_SNAKE_CASE = "Spiderman is surfing"
__SCREAMING_SNAKE_CASE = pipe(_a, video=_a, generator=_a, num_inference_steps=3, output_type="pt" ).frames
__SCREAMING_SNAKE_CASE = np.array([-1.045_8984, -1.127_9297, -0.966_3086, -0.9150_3906, -0.7509_7656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 693 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =(IPNDMScheduler,)
SCREAMING_SNAKE_CASE__ =(("""num_inference_steps""", 50),)
def __lowerCAmelCase ( self, **_a ) -> str:
__SCREAMING_SNAKE_CASE = {"num_train_timesteps": 10_00}
config.update(**_a )
return config
def __lowerCAmelCase ( self, _a=0, **_a ) -> List[Any]:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
if time_step is None:
__SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
__SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a )
new_scheduler.set_timesteps(_a )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self ) -> str:
pass
def __lowerCAmelCase ( self, _a=0, **_a ) -> int:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals (must be after setting timesteps)
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
if time_step is None:
__SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
__SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a )
# copy over dummy past residuals
new_scheduler.set_timesteps(_a )
# copy over dummy past residual (must be after setting timesteps)
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self, **_a ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE = 10
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(_a )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE = model(_a, _a )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE = model(_a, _a )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample
return sample
def __lowerCAmelCase ( self ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
if num_inference_steps is not None and hasattr(_a, "set_timesteps" ):
scheduler.set_timesteps(_a )
elif num_inference_steps is not None and not hasattr(_a, "set_timesteps" ):
__SCREAMING_SNAKE_CASE = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.timesteps[5]
__SCREAMING_SNAKE_CASE = scheduler.timesteps[6]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def __lowerCAmelCase ( self ) -> str:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a, time_step=_a )
def __lowerCAmelCase ( self ) -> Optional[Any]:
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=_a, time_step=_a )
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.full_loop()
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 693 | 1 |
def _A ( __snake_case :list[int] ) -> float:
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
__SCREAMING_SNAKE_CASE = sum(__snake_case ) / len(__snake_case ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 |
import random
from .binary_exp_mod import bin_exp_mod
def _A ( __snake_case :List[Any] , __snake_case :Union[str, Any]=1000 ) -> int:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__SCREAMING_SNAKE_CASE = n - 1
__SCREAMING_SNAKE_CASE = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__SCREAMING_SNAKE_CASE = 0
while count < prec:
__SCREAMING_SNAKE_CASE = random.randint(2 , n - 1 )
__SCREAMING_SNAKE_CASE = bin_exp_mod(__snake_case , __snake_case , __snake_case )
if b != 1:
__SCREAMING_SNAKE_CASE = True
for _ in range(__snake_case ):
if b == n - 1:
__SCREAMING_SNAKE_CASE = False
break
__SCREAMING_SNAKE_CASE = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_snake_case : int = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 693 | 1 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
def __init__( self, _a=0.01, _a=10_00 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = p_stop
__SCREAMING_SNAKE_CASE = max_length
def __iter__( self ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = False
while not stop and count < self.max_length:
yield count
count += 1
__SCREAMING_SNAKE_CASE = random.random() < self.p_stop
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self, _a, _a, _a=False, _a=True ) -> List[Any]:
__SCREAMING_SNAKE_CASE = [
BatchSamplerShard(_a, 2, _a, split_batches=_a, even_batches=_a )
for i in range(2 )
]
__SCREAMING_SNAKE_CASE = [list(_a ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(_a ) for shard in batch_sampler_shards], [len(_a ) for e in expected] )
self.assertListEqual(_a, _a )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of total batch size.
__SCREAMING_SNAKE_CASE = BatchSampler(range(24 ), batch_size=3, drop_last=_a )
__SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_a, _a )
__SCREAMING_SNAKE_CASE = BatchSampler(range(24 ), batch_size=3, drop_last=_a )
# Expected shouldn't change
self.check_batch_sampler_shards(_a, _a )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__SCREAMING_SNAKE_CASE = BatchSampler(range(21 ), batch_size=3, drop_last=_a )
__SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(_a, _a )
__SCREAMING_SNAKE_CASE = BatchSampler(range(21 ), batch_size=3, drop_last=_a )
__SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_a, _a )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__SCREAMING_SNAKE_CASE = BatchSampler(range(22 ), batch_size=3, drop_last=_a )
__SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(_a, _a )
__SCREAMING_SNAKE_CASE = BatchSampler(range(22 ), batch_size=3, drop_last=_a )
__SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_a, _a )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__SCREAMING_SNAKE_CASE = BatchSampler(range(20 ), batch_size=3, drop_last=_a )
__SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(_a, _a )
__SCREAMING_SNAKE_CASE = BatchSampler(range(20 ), batch_size=3, drop_last=_a )
__SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_a, _a )
# Check the shards when the dataset is very small.
__SCREAMING_SNAKE_CASE = BatchSampler(range(2 ), batch_size=3, drop_last=_a )
__SCREAMING_SNAKE_CASE = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(_a, _a )
__SCREAMING_SNAKE_CASE = BatchSampler(range(2 ), batch_size=3, drop_last=_a )
__SCREAMING_SNAKE_CASE = [[], []]
self.check_batch_sampler_shards(_a, _a )
def __lowerCAmelCase ( self ) -> Optional[Any]:
# Check the shards when the dataset is a round multiple of batch size.
__SCREAMING_SNAKE_CASE = BatchSampler(range(24 ), batch_size=4, drop_last=_a )
__SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_a, _a, split_batches=_a )
__SCREAMING_SNAKE_CASE = BatchSampler(range(24 ), batch_size=4, drop_last=_a )
# Expected shouldn't change
self.check_batch_sampler_shards(_a, _a, split_batches=_a )
# Check the shards when the dataset is not a round multiple of batch size.
__SCREAMING_SNAKE_CASE = BatchSampler(range(22 ), batch_size=4, drop_last=_a )
__SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(_a, _a, split_batches=_a )
__SCREAMING_SNAKE_CASE = BatchSampler(range(22 ), batch_size=4, drop_last=_a )
__SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_a, _a, split_batches=_a )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__SCREAMING_SNAKE_CASE = BatchSampler(range(21 ), batch_size=4, drop_last=_a )
__SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(_a, _a, split_batches=_a )
__SCREAMING_SNAKE_CASE = BatchSampler(range(21 ), batch_size=4, drop_last=_a )
__SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_a, _a, split_batches=_a )
# Check the shards when the dataset is very small.
__SCREAMING_SNAKE_CASE = BatchSampler(range(2 ), batch_size=4, drop_last=_a )
__SCREAMING_SNAKE_CASE = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(_a, _a, split_batches=_a )
__SCREAMING_SNAKE_CASE = BatchSampler(range(2 ), batch_size=4, drop_last=_a )
__SCREAMING_SNAKE_CASE = [[], []]
self.check_batch_sampler_shards(_a, _a, split_batches=_a )
def __lowerCAmelCase ( self ) -> List[str]:
# Check the shards when the dataset is a round multiple of total batch size.
__SCREAMING_SNAKE_CASE = BatchSampler(range(24 ), batch_size=3, drop_last=_a )
__SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_a, _a, even_batches=_a )
__SCREAMING_SNAKE_CASE = BatchSampler(range(24 ), batch_size=3, drop_last=_a )
# Expected shouldn't change
self.check_batch_sampler_shards(_a, _a, even_batches=_a )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__SCREAMING_SNAKE_CASE = BatchSampler(range(21 ), batch_size=3, drop_last=_a )
__SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_a, _a, even_batches=_a )
__SCREAMING_SNAKE_CASE = BatchSampler(range(21 ), batch_size=3, drop_last=_a )
__SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_a, _a, even_batches=_a )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__SCREAMING_SNAKE_CASE = BatchSampler(range(22 ), batch_size=3, drop_last=_a )
__SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(_a, _a, even_batches=_a )
__SCREAMING_SNAKE_CASE = BatchSampler(range(22 ), batch_size=3, drop_last=_a )
__SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_a, _a, even_batches=_a )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__SCREAMING_SNAKE_CASE = BatchSampler(range(20 ), batch_size=3, drop_last=_a )
__SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_a, _a, even_batches=_a )
__SCREAMING_SNAKE_CASE = BatchSampler(range(20 ), batch_size=3, drop_last=_a )
__SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_a, _a, even_batches=_a )
# Check the shards when the dataset is very small.
__SCREAMING_SNAKE_CASE = BatchSampler(range(2 ), batch_size=3, drop_last=_a )
__SCREAMING_SNAKE_CASE = [[[0, 1]], []]
self.check_batch_sampler_shards(_a, _a, even_batches=_a )
__SCREAMING_SNAKE_CASE = BatchSampler(range(2 ), batch_size=3, drop_last=_a )
__SCREAMING_SNAKE_CASE = [[], []]
self.check_batch_sampler_shards(_a, _a, even_batches=_a )
def __lowerCAmelCase ( self ) -> List[Any]:
# Check the shards when the dataset is a round multiple of batch size.
__SCREAMING_SNAKE_CASE = BatchSampler(range(24 ), batch_size=4, drop_last=_a )
__SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_a, _a, split_batches=_a, even_batches=_a )
__SCREAMING_SNAKE_CASE = BatchSampler(range(24 ), batch_size=4, drop_last=_a )
# Expected shouldn't change
self.check_batch_sampler_shards(_a, _a, split_batches=_a, even_batches=_a )
# Check the shards when the dataset is not a round multiple of batch size.
__SCREAMING_SNAKE_CASE = BatchSampler(range(22 ), batch_size=4, drop_last=_a )
__SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_a, _a, split_batches=_a, even_batches=_a )
__SCREAMING_SNAKE_CASE = BatchSampler(range(22 ), batch_size=4, drop_last=_a )
__SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_a, _a, split_batches=_a, even_batches=_a )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__SCREAMING_SNAKE_CASE = BatchSampler(range(21 ), batch_size=4, drop_last=_a )
__SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_a, _a, split_batches=_a, even_batches=_a )
__SCREAMING_SNAKE_CASE = BatchSampler(range(21 ), batch_size=4, drop_last=_a )
__SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_a, _a, split_batches=_a, even_batches=_a )
# Check the shards when the dataset is very small.
__SCREAMING_SNAKE_CASE = BatchSampler(range(2 ), batch_size=4, drop_last=_a )
__SCREAMING_SNAKE_CASE = [[[0, 1]], []]
self.check_batch_sampler_shards(_a, _a, split_batches=_a, even_batches=_a )
__SCREAMING_SNAKE_CASE = BatchSampler(range(2 ), batch_size=4, drop_last=_a )
__SCREAMING_SNAKE_CASE = [[], []]
self.check_batch_sampler_shards(_a, _a, split_batches=_a, even_batches=_a )
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__SCREAMING_SNAKE_CASE = [BatchSamplerShard(_a, 2, _a, even_batches=_a ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ), 3 )
self.assertEqual(len(batch_sampler_shards[1] ), 2 )
self.assertListEqual(list(batch_sampler_shards[0] ), [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ), [[3, 4], [9, 10, 11]] )
def __lowerCAmelCase ( self, _a, _a, _a, _a=False, _a=2, _a=False ) -> List[str]:
random.seed(_a )
__SCREAMING_SNAKE_CASE = list(_a )
__SCREAMING_SNAKE_CASE = [
IterableDatasetShard(
_a, batch_size=_a, drop_last=_a, num_processes=_a, process_index=_a, split_batches=_a, )
for i in range(_a )
]
__SCREAMING_SNAKE_CASE = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(_a )
iterable_dataset_lists.append(list(_a ) )
__SCREAMING_SNAKE_CASE = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__SCREAMING_SNAKE_CASE = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(_a ), len(_a ) )
self.assertTrue(len(_a ) % shard_batch_size == 0 )
__SCREAMING_SNAKE_CASE = []
for idx in range(0, len(_a ), _a ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(_a ) < len(_a ):
reference += reference
self.assertListEqual(_a, reference[: len(_a )] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = RandomIterableDataset()
self.check_iterable_dataset_shards(_a, _a, batch_size=4, drop_last=_a, split_batches=_a )
self.check_iterable_dataset_shards(_a, _a, batch_size=4, drop_last=_a, split_batches=_a )
self.check_iterable_dataset_shards(_a, _a, batch_size=4, drop_last=_a, split_batches=_a )
self.check_iterable_dataset_shards(_a, _a, batch_size=4, drop_last=_a, split_batches=_a )
# Edge case with a very small dataset
__SCREAMING_SNAKE_CASE = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(_a, _a, batch_size=4, drop_last=_a, split_batches=_a )
self.check_iterable_dataset_shards(_a, _a, batch_size=4, drop_last=_a, split_batches=_a )
self.check_iterable_dataset_shards(_a, _a, batch_size=4, drop_last=_a, split_batches=_a )
self.check_iterable_dataset_shards(_a, _a, batch_size=4, drop_last=_a, split_batches=_a )
def __lowerCAmelCase ( self ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = BatchSampler(range(16 ), batch_size=4, drop_last=_a )
__SCREAMING_SNAKE_CASE = SkipBatchSampler(_a, 2 )
self.assertListEqual(list(_a ), [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = SkipDataLoader(list(range(16 ) ), batch_size=4, skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader], [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __lowerCAmelCase ( self ) -> int:
__SCREAMING_SNAKE_CASE = DataLoader(list(range(16 ) ), batch_size=4 )
__SCREAMING_SNAKE_CASE = skip_first_batches(_a, num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader], [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __lowerCAmelCase ( self ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = DataLoaderShard(list(range(16 ) ), batch_size=4 )
for idx, _ in enumerate(_a ):
self.assertEqual(dataloader.end_of_dataloader, idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_a ):
self.assertEqual(dataloader.end_of_dataloader, idx == 3 )
def __lowerCAmelCase ( self ) -> Any:
Accelerator()
__SCREAMING_SNAKE_CASE = DataLoaderDispatcher(range(16 ), batch_size=4 )
for idx, _ in enumerate(_a ):
self.assertEqual(dataloader.end_of_dataloader, idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_a ):
self.assertEqual(dataloader.end_of_dataloader, idx == 3 )
| 693 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _A ( __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
__SCREAMING_SNAKE_CASE = ksize + 1
__SCREAMING_SNAKE_CASE = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__snake_case ):
for x in range(__snake_case ):
# distance from center
__SCREAMING_SNAKE_CASE = x - ksize // 2
__SCREAMING_SNAKE_CASE = y - ksize // 2
# degree to radiant
__SCREAMING_SNAKE_CASE = theta / 180 * np.pi
__SCREAMING_SNAKE_CASE = np.cos(_theta )
__SCREAMING_SNAKE_CASE = np.sin(_theta )
# get kernel x
__SCREAMING_SNAKE_CASE = cos_theta * px + sin_theta * py
# get kernel y
__SCREAMING_SNAKE_CASE = -sin_theta * px + cos_theta * py
# fill kernel
__SCREAMING_SNAKE_CASE = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case : Union[str, Any] = imread('../image_data/lena.jpg')
# turn image in gray scale value
_snake_case : List[str] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case : int = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
_snake_case : List[str] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case : Optional[Any] = out / out.max() * 2_55
_snake_case : Union[str, Any] = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 693 | 1 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_snake_case : Any = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
def __init__( self, *_a, **_a ) -> None:
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead.", _a, )
super().__init__(*_a, **_a )
| 693 |
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
assert isinstance(__snake_case , __snake_case ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
__SCREAMING_SNAKE_CASE = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(__snake_case )
else:
__SCREAMING_SNAKE_CASE = sylvester(number - 1 )
__SCREAMING_SNAKE_CASE = num - 1
__SCREAMING_SNAKE_CASE = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 693 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : List[str] = logging.get_logger(__name__)
_snake_case : Tuple = {}
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ ="""llama"""
SCREAMING_SNAKE_CASE__ =["""past_key_values"""]
def __init__( self, _a=3_20_00, _a=40_96, _a=1_10_08, _a=32, _a=32, _a=None, _a="silu", _a=20_48, _a=0.02, _a=1E-6, _a=True, _a=0, _a=1, _a=2, _a=1, _a=False, _a=None, **_a, ) -> Tuple:
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = num_key_value_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = rms_norm_eps
__SCREAMING_SNAKE_CASE = pretraining_tp
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_a, bos_token_id=_a, eos_token_id=_a, tie_word_embeddings=_a, **_a, )
def __lowerCAmelCase ( self ) -> Optional[int]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, _a ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'''got {self.rope_scaling}''' )
__SCREAMING_SNAKE_CASE = self.rope_scaling.get("type", _a )
__SCREAMING_SNAKE_CASE = self.rope_scaling.get("factor", _a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(_a, _a ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 693 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __lowerCAmelCase ( *_a, **_a ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", )
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_a ), [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
], )
@require_tf
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", framework="tf" )
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(_a ), [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
], )
@slow
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", )
# This is an image of 2 cats with remotes and no planes
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_a ), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
@slow
@require_tf
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", framework="tf" )
# This is an image of 2 cats with remotes and no planes
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_a ), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
| 693 | 1 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a, _a=2, _a=3, _a=4, _a=2, _a=7, _a=True, _a=True, _a=True, _a=True, _a=99, _a=36, _a=3, _a=4, _a=37, _a="gelu", _a=0.1, _a=0.1, _a=5_12, _a=16, _a=2, _a=0.02, _a=6, _a=6, _a=3, _a=4, _a=None, _a=10_00, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = text_seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = coordinate_size
__SCREAMING_SNAKE_CASE = shape_size
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__SCREAMING_SNAKE_CASE = text_seq_length
__SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2 + 1
__SCREAMING_SNAKE_CASE = self.text_seq_length + self.image_seq_length
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length, 4], self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__SCREAMING_SNAKE_CASE = bbox[i, j, 3]
__SCREAMING_SNAKE_CASE = bbox[i, j, 1]
__SCREAMING_SNAKE_CASE = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__SCREAMING_SNAKE_CASE = bbox[i, j, 2]
__SCREAMING_SNAKE_CASE = bbox[i, j, 0]
__SCREAMING_SNAKE_CASE = t
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.text_seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length], self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size], self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length], self.num_labels )
__SCREAMING_SNAKE_CASE = LayoutLMvaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, coordinate_size=self.coordinate_size, shape_size=self.shape_size, input_size=self.image_size, patch_size=self.patch_size, )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, _a, _a ) -> int:
__SCREAMING_SNAKE_CASE = LayoutLMvaModel(config=_a )
model.to(_a )
model.eval()
# text + image
__SCREAMING_SNAKE_CASE = model(_a, pixel_values=_a )
__SCREAMING_SNAKE_CASE = model(
_a, bbox=_a, pixel_values=_a, attention_mask=_a, token_type_ids=_a )
__SCREAMING_SNAKE_CASE = model(_a, bbox=_a, pixel_values=_a, token_type_ids=_a )
__SCREAMING_SNAKE_CASE = model(_a, bbox=_a, pixel_values=_a )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__SCREAMING_SNAKE_CASE = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__SCREAMING_SNAKE_CASE = model(pixel_values=_a )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, _a, _a ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = LayoutLMvaForSequenceClassification(_a )
model.to(_a )
model.eval()
__SCREAMING_SNAKE_CASE = model(
_a, bbox=_a, pixel_values=_a, attention_mask=_a, token_type_ids=_a, labels=_a, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, _a, _a ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = LayoutLMvaForTokenClassification(config=_a )
model.to(_a )
model.eval()
__SCREAMING_SNAKE_CASE = model(
_a, bbox=_a, pixel_values=_a, attention_mask=_a, token_type_ids=_a, labels=_a, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, _a, _a ) -> Dict:
__SCREAMING_SNAKE_CASE = LayoutLMvaForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
__SCREAMING_SNAKE_CASE = model(
_a, bbox=_a, pixel_values=_a, attention_mask=_a, token_type_ids=_a, start_positions=_a, end_positions=_a, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =(
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ =(
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a ) -> Dict:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def __lowerCAmelCase ( self ) -> int:
__SCREAMING_SNAKE_CASE = LayoutLMvaModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self, config_class=_a, hidden_size=37 )
def __lowerCAmelCase ( self, _a, _a, _a=False ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = copy.deepcopy(_a )
if model_class in get_values(_a ):
__SCREAMING_SNAKE_CASE = {
k: v.unsqueeze(1 ).expand(-1, self.model_tester.num_choices, -1 ).contiguous()
if isinstance(_a, torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_a ):
__SCREAMING_SNAKE_CASE = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=_a )
elif model_class in get_values(_a ):
__SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=_a )
__SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=_a )
elif model_class in [
*get_values(_a ),
]:
__SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=_a )
elif model_class in [
*get_values(_a ),
]:
__SCREAMING_SNAKE_CASE = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=torch.long, device=_a, )
return inputs_dict
def __lowerCAmelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*_a )
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
@slow
def __lowerCAmelCase ( self ) -> Optional[int]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = LayoutLMvaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _A ( ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> int:
return LayoutLMvaImageProcessor(apply_ocr=_a ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(_a )
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=_a, return_tensors="pt" ).pixel_values.to(_a )
__SCREAMING_SNAKE_CASE = torch.tensor([[1, 2]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__SCREAMING_SNAKE_CASE = model(
input_ids=input_ids.to(_a ), bbox=bbox.to(_a ), pixel_values=pixel_values.to(_a ), )
# verify the logits
__SCREAMING_SNAKE_CASE = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape, _a )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(_a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], _a, atol=1E-4 ) )
| 693 |
from __future__ import annotations
import math
def _A ( __snake_case :int , __snake_case :int , __snake_case :bool , __snake_case :list[int] , __snake_case :float ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__snake_case ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__SCREAMING_SNAKE_CASE = math.log(len(__snake_case ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 693 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
_snake_case : str = logging.get_logger(__name__)
_snake_case : List[Any] = {'vocab_file': 'vocab.txt'}
_snake_case : List[Any] = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
_snake_case : int = {
'YituTech/conv-bert-base': 5_12,
'YituTech/conv-bert-medium-small': 5_12,
'YituTech/conv-bert-small': 5_12,
}
_snake_case : str = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ =PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ =ConvBertTokenizer
def __init__( self, _a=None, _a=None, _a=True, _a="[UNK]", _a="[SEP]", _a="[PAD]", _a="[CLS]", _a="[MASK]", _a=True, _a=None, **_a, ) -> Dict:
super().__init__(
_a, tokenizer_file=_a, do_lower_case=_a, unk_token=_a, sep_token=_a, pad_token=_a, cls_token=_a, mask_token=_a, tokenize_chinese_chars=_a, strip_accents=_a, **_a, )
__SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase", _a ) != do_lower_case
or normalizer_state.get("strip_accents", _a ) != strip_accents
or normalizer_state.get("handle_chinese_chars", _a ) != tokenize_chinese_chars
):
__SCREAMING_SNAKE_CASE = getattr(_a, normalizer_state.pop("type" ) )
__SCREAMING_SNAKE_CASE = do_lower_case
__SCREAMING_SNAKE_CASE = strip_accents
__SCREAMING_SNAKE_CASE = tokenize_chinese_chars
__SCREAMING_SNAKE_CASE = normalizer_class(**_a )
__SCREAMING_SNAKE_CASE = do_lower_case
def __lowerCAmelCase ( self, _a, _a=None ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self, _a, _a = None ) -> List[int]:
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self, _a, _a = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE = self._tokenizer.model.save(_a, name=_a )
return tuple(_a )
| 693 |
def _A ( __snake_case :bytes ) -> str:
"""simple docstring"""
return "".join([hex(__snake_case )[2:].zfill(2 ).upper() for byte in list(__snake_case )] )
def _A ( __snake_case :str ) -> bytes:
"""simple docstring"""
if (len(__snake_case ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__snake_case ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(__snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 | 1 |
import os
import string
import sys
_snake_case : Optional[int] = 1 << 8
_snake_case : List[Any] = {
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
_snake_case : Dict = KEYMAP['up']
_snake_case : Tuple = KEYMAP['left']
if sys.platform == "win32":
_snake_case : Optional[int] = []
_snake_case : Optional[int] = {
B'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
B'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
B'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
B'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
B'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
B'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
B'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
B'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
_snake_case : str = ord(str(i))
def _A ( ) -> int:
"""simple docstring"""
if os.name == "nt":
import msvcrt
__SCREAMING_SNAKE_CASE = "mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__snake_case ) == 0:
# Read the keystroke
__SCREAMING_SNAKE_CASE = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__SCREAMING_SNAKE_CASE = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__SCREAMING_SNAKE_CASE = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) )
WIN_CH_BUFFER.append(__snake_case )
if ord(__snake_case ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
__SCREAMING_SNAKE_CASE = chr(KEYMAP["esc"] )
except KeyError:
__SCREAMING_SNAKE_CASE = cha[1]
else:
__SCREAMING_SNAKE_CASE = ch.decode(__snake_case )
else:
__SCREAMING_SNAKE_CASE = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
__SCREAMING_SNAKE_CASE = sys.stdin.fileno()
__SCREAMING_SNAKE_CASE = termios.tcgetattr(__snake_case )
try:
tty.setraw(__snake_case )
__SCREAMING_SNAKE_CASE = sys.stdin.read(1 )
finally:
termios.tcsetattr(__snake_case , termios.TCSADRAIN , __snake_case )
return ch
def _A ( ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__snake_case ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__snake_case ) == KEYMAP["esc"]:
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__snake_case ) == KEYMAP["mod_int"]:
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__snake_case ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__snake_case ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__snake_case ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 693 |
from functools import lru_cache
def _A ( __snake_case :int ) -> set:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__snake_case )
if n > 1:
factors.add(__snake_case )
return factors
@lru_cache
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
return len(unique_prime_factors(__snake_case ) )
def _A ( __snake_case :list ) -> bool:
"""simple docstring"""
return len(set(__snake_case ) ) in (0, 1)
def _A ( __snake_case :int ) -> list:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 2
while True:
# Increment each value of a generated range
__SCREAMING_SNAKE_CASE = [base + i for i in range(__snake_case )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__SCREAMING_SNAKE_CASE = [upf_len(__snake_case ) for x in group]
checker.append(__snake_case )
# If all numbers in the list are equal, return the group variable.
if equality(__snake_case ):
return group
# Increment our base variable by 1
base += 1
def _A ( __snake_case :int = 4 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = run(__snake_case )
return results[0] if len(__snake_case ) else None
if __name__ == "__main__":
print(solution())
| 693 | 1 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : List[str] = logging.get_logger(__name__)
_snake_case : Dict = {
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ ="""align_text_model"""
def __init__( self, _a=3_05_22, _a=7_68, _a=12, _a=12, _a=30_72, _a="gelu", _a=0.1, _a=0.1, _a=5_12, _a=2, _a=0.02, _a=1E-1_2, _a=0, _a="absolute", _a=True, **_a, ) -> Any:
super().__init__(**_a )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = pad_token_id
@classmethod
def __lowerCAmelCase ( cls, _a, **_a ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = cls.get_config_dict(_a, **_a )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
__SCREAMING_SNAKE_CASE = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls, "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a, **_a )
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ ="""align_vision_model"""
def __init__( self, _a = 3, _a = 6_00, _a = 2.0, _a = 3.1, _a = 8, _a = [3, 3, 5, 3, 5, 5, 3], _a = [32, 16, 24, 40, 80, 1_12, 1_92], _a = [16, 24, 40, 80, 1_12, 1_92, 3_20], _a = [], _a = [1, 2, 2, 2, 1, 2, 1], _a = [1, 2, 2, 3, 3, 4, 1], _a = [1, 6, 6, 6, 6, 6, 6], _a = 0.25, _a = "swish", _a = 25_60, _a = "mean", _a = 0.02, _a = 0.001, _a = 0.99, _a = 0.2, **_a, ) -> int:
super().__init__(**_a )
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = width_coefficient
__SCREAMING_SNAKE_CASE = depth_coefficient
__SCREAMING_SNAKE_CASE = depth_divisor
__SCREAMING_SNAKE_CASE = kernel_sizes
__SCREAMING_SNAKE_CASE = in_channels
__SCREAMING_SNAKE_CASE = out_channels
__SCREAMING_SNAKE_CASE = depthwise_padding
__SCREAMING_SNAKE_CASE = strides
__SCREAMING_SNAKE_CASE = num_block_repeats
__SCREAMING_SNAKE_CASE = expand_ratios
__SCREAMING_SNAKE_CASE = squeeze_expansion_ratio
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dim
__SCREAMING_SNAKE_CASE = pooling_type
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = batch_norm_eps
__SCREAMING_SNAKE_CASE = batch_norm_momentum
__SCREAMING_SNAKE_CASE = drop_connect_rate
__SCREAMING_SNAKE_CASE = sum(_a ) * 4
@classmethod
def __lowerCAmelCase ( cls, _a, **_a ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = cls.get_config_dict(_a, **_a )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
__SCREAMING_SNAKE_CASE = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls, "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a, **_a )
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ ="""align"""
SCREAMING_SNAKE_CASE__ =True
def __init__( self, _a=None, _a=None, _a=6_40, _a=1.0, _a=0.02, **_a, ) -> List[str]:
super().__init__(**_a )
if text_config is None:
__SCREAMING_SNAKE_CASE = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
__SCREAMING_SNAKE_CASE = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
__SCREAMING_SNAKE_CASE = AlignTextConfig(**_a )
__SCREAMING_SNAKE_CASE = AlignVisionConfig(**_a )
__SCREAMING_SNAKE_CASE = projection_dim
__SCREAMING_SNAKE_CASE = temperature_init_value
__SCREAMING_SNAKE_CASE = initializer_range
@classmethod
def __lowerCAmelCase ( cls, _a, _a, **_a ) -> Union[str, Any]:
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **_a )
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE = self.text_config.to_dict()
__SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 693 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _A ( __snake_case :Dict ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = VideoMAEConfig()
set_architecture_configs(__snake_case , __snake_case )
if "finetuned" not in model_name:
__SCREAMING_SNAKE_CASE = False
if "finetuned" in model_name:
__SCREAMING_SNAKE_CASE = "huggingface/label-files"
if "kinetics" in model_name:
__SCREAMING_SNAKE_CASE = 400
__SCREAMING_SNAKE_CASE = "kinetics400-id2label.json"
elif "ssv2" in model_name:
__SCREAMING_SNAKE_CASE = 174
__SCREAMING_SNAKE_CASE = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
__SCREAMING_SNAKE_CASE = {int(__snake_case ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def _A ( __snake_case :Dict , __snake_case :Optional[Any] ) -> List[Any]:
"""simple docstring"""
if "small" in model_name:
__SCREAMING_SNAKE_CASE = 384
__SCREAMING_SNAKE_CASE = 1536
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = 192
__SCREAMING_SNAKE_CASE = 768
elif "large" in model_name:
__SCREAMING_SNAKE_CASE = 1024
__SCREAMING_SNAKE_CASE = 4096
__SCREAMING_SNAKE_CASE = 24
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 512
__SCREAMING_SNAKE_CASE = 2048
elif "huge" in model_name:
__SCREAMING_SNAKE_CASE = 1280
__SCREAMING_SNAKE_CASE = 5120
__SCREAMING_SNAKE_CASE = 32
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 640
__SCREAMING_SNAKE_CASE = 2560
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def _A ( __snake_case :List[Any] ) -> Optional[int]:
"""simple docstring"""
if "encoder." in name:
__SCREAMING_SNAKE_CASE = name.replace("encoder." , "" )
if "cls_token" in name:
__SCREAMING_SNAKE_CASE = name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.self" )
if "attn" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.attention" )
if "norm1" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__SCREAMING_SNAKE_CASE = name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__SCREAMING_SNAKE_CASE = name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace("head" , "classifier" )
return name
def _A ( __snake_case :Union[str, Any] , __snake_case :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = orig_state_dict.pop(__snake_case )
if key.startswith("encoder." ):
__SCREAMING_SNAKE_CASE = key.replace("encoder." , "" )
if "qkv" in key:
__SCREAMING_SNAKE_CASE = key.split("." )
if key.startswith("decoder.blocks" ):
__SCREAMING_SNAKE_CASE = config.decoder_hidden_size
__SCREAMING_SNAKE_CASE = int(key_split[2] )
__SCREAMING_SNAKE_CASE = "decoder.decoder_layers."
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = config.hidden_size
__SCREAMING_SNAKE_CASE = int(key_split[1] )
__SCREAMING_SNAKE_CASE = "videomae.encoder.layer."
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = val
return orig_state_dict
def _A ( ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
__SCREAMING_SNAKE_CASE = np.load(__snake_case )
return list(__snake_case )
def _A ( __snake_case :Optional[int] , __snake_case :List[str] , __snake_case :Union[str, Any] , __snake_case :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_videomae_config(__snake_case )
if "finetuned" in model_name:
__SCREAMING_SNAKE_CASE = VideoMAEForVideoClassification(__snake_case )
else:
__SCREAMING_SNAKE_CASE = VideoMAEForPreTraining(__snake_case )
# download original checkpoint, hosted on Google Drive
__SCREAMING_SNAKE_CASE = "pytorch_model.bin"
gdown.cached_download(__snake_case , __snake_case , quiet=__snake_case )
__SCREAMING_SNAKE_CASE = torch.load(__snake_case , map_location="cpu" )
if "model" in files:
__SCREAMING_SNAKE_CASE = files["model"]
else:
__SCREAMING_SNAKE_CASE = files["module"]
__SCREAMING_SNAKE_CASE = convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
model.eval()
# verify model on basic input
__SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__SCREAMING_SNAKE_CASE = prepare_video()
__SCREAMING_SNAKE_CASE = image_processor(__snake_case , return_tensors="pt" )
if "finetuned" not in model_name:
__SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
__SCREAMING_SNAKE_CASE = torch.load(__snake_case )
__SCREAMING_SNAKE_CASE = model(**__snake_case )
__SCREAMING_SNAKE_CASE = outputs.logits
__SCREAMING_SNAKE_CASE = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] )
elif model_name == "videomae-small-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] )
elif model_name == "videomae-base":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] )
elif model_name == "videomae-base-short":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] )
# we verified the loss both for normalized and unnormalized targets for this one
__SCREAMING_SNAKE_CASE = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] )
elif model_name == "videomae-large":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] )
elif model_name == "videomae-large-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] )
elif model_name == "videomae-huge-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] )
elif model_name == "videomae-base-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] )
elif model_name == "videomae-base-short-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] )
elif model_name == "videomae-base-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] )
elif model_name == "videomae-base-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __snake_case , atol=1e-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__SCREAMING_SNAKE_CASE = outputs.loss
assert torch.allclose(__snake_case , __snake_case , atol=1e-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
model.save_pretrained(__snake_case )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(__snake_case , organization="nielsr" )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the ๐ค hub.'
)
_snake_case : Optional[int] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 693 | 1 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@require_torch
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-audio-classification", model="hf-internal-testing/tiny-clap-htsat-unfused" )
__SCREAMING_SNAKE_CASE = load_dataset("ashraq/esc50" )
__SCREAMING_SNAKE_CASE = dataset["train"]["audio"][-1]["array"]
__SCREAMING_SNAKE_CASE = audio_classifier(_a, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_a ), [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}], )
@unittest.skip("No models are available in TF" )
def __lowerCAmelCase ( self ) -> Tuple:
pass
@slow
@require_torch
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-audio-classification", model="laion/clap-htsat-unfused", )
# This is an audio of a dog
__SCREAMING_SNAKE_CASE = load_dataset("ashraq/esc50" )
__SCREAMING_SNAKE_CASE = dataset["train"]["audio"][-1]["array"]
__SCREAMING_SNAKE_CASE = audio_classifier(_a, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_a ), [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
], )
__SCREAMING_SNAKE_CASE = audio_classifier([audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5, )
__SCREAMING_SNAKE_CASE = audio_classifier(
[audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"], batch_size=5 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5, )
@unittest.skip("No models are available in TF" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
pass
| 693 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_snake_case : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
def __init__( self, *_a, **_a ) -> None:
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead.", _a, )
super().__init__(*_a, **_a )
| 693 | 1 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _A ( __snake_case :Dict , __snake_case :Optional[int]=None ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = None
if token is not None:
__SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": f'''Bearer {token}'''}
__SCREAMING_SNAKE_CASE = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
__SCREAMING_SNAKE_CASE = requests.get(__snake_case , headers=__snake_case ).json()
__SCREAMING_SNAKE_CASE = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
__SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 )
for i in range(__snake_case ):
__SCREAMING_SNAKE_CASE = requests.get(url + f'''&page={i + 2}''' , headers=__snake_case ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def _A ( __snake_case :Union[str, Any] , __snake_case :Dict=None ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = None
if token is not None:
__SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": f'''Bearer {token}'''}
__SCREAMING_SNAKE_CASE = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
__SCREAMING_SNAKE_CASE = requests.get(__snake_case , headers=__snake_case ).json()
__SCREAMING_SNAKE_CASE = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
__SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 )
for i in range(__snake_case ):
__SCREAMING_SNAKE_CASE = requests.get(url + f'''&page={i + 2}''' , headers=__snake_case ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def _A ( __snake_case :str , __snake_case :int , __snake_case :Union[str, Any] , __snake_case :Any ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = None
if token is not None:
__SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": f'''Bearer {token}'''}
__SCREAMING_SNAKE_CASE = requests.get(__snake_case , headers=__snake_case , allow_redirects=__snake_case )
__SCREAMING_SNAKE_CASE = result.headers["Location"]
__SCREAMING_SNAKE_CASE = requests.get(__snake_case , allow_redirects=__snake_case )
__SCREAMING_SNAKE_CASE = os.path.join(__snake_case , f'''{artifact_name}.zip''' )
with open(__snake_case , "wb" ) as fp:
fp.write(response.content )
def _A ( __snake_case :List[Any] , __snake_case :Dict=None ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = None
with zipfile.ZipFile(__snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(__snake_case ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__snake_case ) as f:
for line in f:
__SCREAMING_SNAKE_CASE = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
__SCREAMING_SNAKE_CASE = line[: line.index(": " )]
__SCREAMING_SNAKE_CASE = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
__SCREAMING_SNAKE_CASE = line[len("FAILED " ) :]
failed_tests.append(__snake_case )
elif filename == "job_name.txt":
__SCREAMING_SNAKE_CASE = line
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
f'''`errors` and `failed_tests` should have the same number of elements. Got {len(__snake_case )} for `errors` '''
f'''and {len(__snake_case )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
" problem." )
__SCREAMING_SNAKE_CASE = None
if job_name and job_links:
__SCREAMING_SNAKE_CASE = job_links.get(__snake_case , __snake_case )
# A list with elements of the form (line of error, error, failed test)
__SCREAMING_SNAKE_CASE = [x + [y] + [job_link] for x, y in zip(__snake_case , __snake_case )]
return result
def _A ( __snake_case :Tuple , __snake_case :Union[str, Any]=None ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [os.path.join(__snake_case , __snake_case ) for p in os.listdir(__snake_case ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__snake_case , job_links=__snake_case ) )
return errors
def _A ( __snake_case :Optional[int] , __snake_case :Dict=None ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Counter()
counter.update([x[1] for x in logs] )
__SCREAMING_SNAKE_CASE = counter.most_common()
__SCREAMING_SNAKE_CASE = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
__SCREAMING_SNAKE_CASE = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
__SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda __snake_case : item[1]["count"] , reverse=__snake_case ) )
return r
def _A ( __snake_case :int ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = test.split("::" )[0]
if test.startswith("tests/models/" ):
__SCREAMING_SNAKE_CASE = test.split("/" )[2]
else:
__SCREAMING_SNAKE_CASE = None
return test
def _A ( __snake_case :Tuple , __snake_case :Any=None ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [(x[0], x[1], get_model(x[2] )) for x in logs]
__SCREAMING_SNAKE_CASE = [x for x in logs if x[2] is not None]
__SCREAMING_SNAKE_CASE = {x[2] for x in logs}
__SCREAMING_SNAKE_CASE = {}
for test in tests:
__SCREAMING_SNAKE_CASE = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
__SCREAMING_SNAKE_CASE = counter.most_common()
__SCREAMING_SNAKE_CASE = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
__SCREAMING_SNAKE_CASE = sum(error_counts.values() )
if n_errors > 0:
__SCREAMING_SNAKE_CASE = {"count": n_errors, "errors": error_counts}
__SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda __snake_case : item[1]["count"] , reverse=__snake_case ) )
return r
def _A ( __snake_case :List[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = "| no. | error | status |"
__SCREAMING_SNAKE_CASE = "|-:|:-|:-|"
__SCREAMING_SNAKE_CASE = [header, sep]
for error in reduced_by_error:
__SCREAMING_SNAKE_CASE = reduced_by_error[error]["count"]
__SCREAMING_SNAKE_CASE = f'''| {count} | {error[:100]} | |'''
lines.append(__snake_case )
return "\n".join(__snake_case )
def _A ( __snake_case :Union[str, Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = "| model | no. of errors | major error | count |"
__SCREAMING_SNAKE_CASE = "|-:|-:|-:|-:|"
__SCREAMING_SNAKE_CASE = [header, sep]
for model in reduced_by_model:
__SCREAMING_SNAKE_CASE = reduced_by_model[model]["count"]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = list(reduced_by_model[model]["errors"].items() )[0]
__SCREAMING_SNAKE_CASE = f'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(__snake_case )
return "\n".join(__snake_case )
if __name__ == "__main__":
_snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
_snake_case : int = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_snake_case : Tuple = get_job_links(args.workflow_run_id, token=args.token)
_snake_case : int = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_snake_case : int = k.find(' / ')
_snake_case : str = k[index + len(' / ') :]
_snake_case : Optional[Any] = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_snake_case : List[str] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_snake_case : Optional[int] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_snake_case : List[Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_snake_case : int = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_snake_case : Optional[Any] = reduce_by_error(errors)
_snake_case : List[Any] = reduce_by_model(errors)
_snake_case : str = make_github_table(reduced_by_error)
_snake_case : List[str] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 693 |
from math import sqrt
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0
for i in range(1 , int(sqrt(__snake_case ) + 1 ) ):
if n % i == 0 and i != sqrt(__snake_case ):
total += i + n // i
elif i == sqrt(__snake_case ):
total += i
return total - n
def _A ( __snake_case :int = 1_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = sum(
i
for i in range(1 , __snake_case )
if sum_of_divisors(sum_of_divisors(__snake_case ) ) == i and sum_of_divisors(__snake_case ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 693 | 1 |
from math import sqrt
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0
for i in range(1 , int(sqrt(__snake_case ) + 1 ) ):
if n % i == 0 and i != sqrt(__snake_case ):
total += i + n // i
elif i == sqrt(__snake_case ):
total += i
return total - n
def _A ( __snake_case :int = 1_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = sum(
i
for i in range(1 , __snake_case )
if sum_of_divisors(sum_of_divisors(__snake_case ) ) == i and sum_of_divisors(__snake_case ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 693 |
def _A ( __snake_case :int , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 | 1 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
_snake_case : Tuple = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
SCREAMING_SNAKE_CASE__ =field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
SCREAMING_SNAKE_CASE__ =field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
SCREAMING_SNAKE_CASE__ =field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
SCREAMING_SNAKE_CASE__ =field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
SCREAMING_SNAKE_CASE__ =field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
SCREAMING_SNAKE_CASE__ =field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
SCREAMING_SNAKE_CASE__ =field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
SCREAMING_SNAKE_CASE__ =field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""} )
SCREAMING_SNAKE_CASE__ =field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Train language if it is different from the evaluation language."""} )
SCREAMING_SNAKE_CASE__ =field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE__ =field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE__ =field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
SCREAMING_SNAKE_CASE__ =field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""} , )
SCREAMING_SNAKE_CASE__ =field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
SCREAMING_SNAKE_CASE__ =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
SCREAMING_SNAKE_CASE__ =field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
SCREAMING_SNAKE_CASE__ =field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def _A ( ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" , __snake_case )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(__snake_case )
datasets.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
__SCREAMING_SNAKE_CASE = load_dataset(
"xnli" , model_args.language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
__SCREAMING_SNAKE_CASE = load_dataset(
"xnli" , model_args.train_language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__SCREAMING_SNAKE_CASE = train_dataset.features["label"].names
if training_args.do_eval:
__SCREAMING_SNAKE_CASE = load_dataset(
"xnli" , model_args.language , split="validation" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__SCREAMING_SNAKE_CASE = eval_dataset.features["label"].names
if training_args.do_predict:
__SCREAMING_SNAKE_CASE = load_dataset(
"xnli" , model_args.language , split="test" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__SCREAMING_SNAKE_CASE = predict_dataset.features["label"].names
# Labels
__SCREAMING_SNAKE_CASE = len(__snake_case )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__snake_case , idalabel={str(__snake_case ): label for i, label in enumerate(__snake_case )} , labelaid={label: i for i, label in enumerate(__snake_case )} , finetuning_task="xnli" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
__SCREAMING_SNAKE_CASE = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__SCREAMING_SNAKE_CASE = False
def preprocess_function(__snake_case :Any ):
# Tokenize the texts
return tokenizer(
examples["premise"] , examples["hypothesis"] , padding=__snake_case , max_length=data_args.max_seq_length , truncation=__snake_case , )
if training_args.do_train:
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE = min(len(__snake_case ) , data_args.max_train_samples )
__SCREAMING_SNAKE_CASE = train_dataset.select(range(__snake_case ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
__SCREAMING_SNAKE_CASE = train_dataset.map(
__snake_case , batched=__snake_case , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on train dataset" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(__snake_case ) ) , 3 ):
logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE = min(len(__snake_case ) , data_args.max_eval_samples )
__SCREAMING_SNAKE_CASE = eval_dataset.select(range(__snake_case ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
__SCREAMING_SNAKE_CASE = eval_dataset.map(
__snake_case , batched=__snake_case , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on validation dataset" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
__SCREAMING_SNAKE_CASE = min(len(__snake_case ) , data_args.max_predict_samples )
__SCREAMING_SNAKE_CASE = predict_dataset.select(range(__snake_case ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
__SCREAMING_SNAKE_CASE = predict_dataset.map(
__snake_case , batched=__snake_case , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on prediction dataset" , )
# Get the metric function
__SCREAMING_SNAKE_CASE = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__snake_case :EvalPrediction ):
__SCREAMING_SNAKE_CASE = p.predictions[0] if isinstance(p.predictions , __snake_case ) else p.predictions
__SCREAMING_SNAKE_CASE = np.argmax(__snake_case , axis=1 )
return metric.compute(predictions=__snake_case , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__SCREAMING_SNAKE_CASE = default_data_collator
elif training_args.fpaa:
__SCREAMING_SNAKE_CASE = DataCollatorWithPadding(__snake_case , pad_to_multiple_of=8 )
else:
__SCREAMING_SNAKE_CASE = None
# Initialize our Trainer
__SCREAMING_SNAKE_CASE = Trainer(
model=__snake_case , args=__snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__snake_case , tokenizer=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE = last_checkpoint
__SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=__snake_case )
__SCREAMING_SNAKE_CASE = train_result.metrics
__SCREAMING_SNAKE_CASE = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__snake_case )
)
__SCREAMING_SNAKE_CASE = min(__snake_case , len(__snake_case ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , __snake_case )
trainer.save_metrics("train" , __snake_case )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__SCREAMING_SNAKE_CASE = trainer.evaluate(eval_dataset=__snake_case )
__SCREAMING_SNAKE_CASE = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__snake_case )
__SCREAMING_SNAKE_CASE = min(__snake_case , len(__snake_case ) )
trainer.log_metrics("eval" , __snake_case )
trainer.save_metrics("eval" , __snake_case )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = trainer.predict(__snake_case , metric_key_prefix="predict" )
__SCREAMING_SNAKE_CASE = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__snake_case )
)
__SCREAMING_SNAKE_CASE = min(__snake_case , len(__snake_case ) )
trainer.log_metrics("predict" , __snake_case )
trainer.save_metrics("predict" , __snake_case )
__SCREAMING_SNAKE_CASE = np.argmax(__snake_case , axis=1 )
__SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , "predictions.txt" )
if trainer.is_world_process_zero():
with open(__snake_case , "w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(__snake_case ):
__SCREAMING_SNAKE_CASE = label_list[item]
writer.write(f'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 693 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a, _a=99, _a=13, _a=7, _a=9, _a=True, _a=True, _a=False, _a=32, _a=5, _a=4, _a=37, _a=8, _a=0.1, _a=0.002, _a=1, _a=0, _a=0, _a=None, _a=None, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = encoder_seq_length
__SCREAMING_SNAKE_CASE = decoder_seq_length
# For common tests
__SCREAMING_SNAKE_CASE = self.decoder_seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_attention_mask
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = d_ff
__SCREAMING_SNAKE_CASE = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE = dropout_rate
__SCREAMING_SNAKE_CASE = initializer_factor
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = decoder_start_token_id
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = decoder_layers
def __lowerCAmelCase ( self ) -> Optional[int]:
return TaConfig.from_pretrained("google/umt5-base" )
def __lowerCAmelCase ( self, _a, _a, _a, _a=None, _a=None, _a=None, _a=None, _a=None, ) -> int:
if attention_mask is None:
__SCREAMING_SNAKE_CASE = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.num_hidden_layers, config.num_attention_heads, device=_a )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.num_decoder_layers, config.num_attention_heads, device=_a )
if cross_attn_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers, config.num_attention_heads, device=_a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__SCREAMING_SNAKE_CASE = input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = decoder_input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = config.num_attention_heads
__SCREAMING_SNAKE_CASE = self.prepare_inputs_dict(_a, _a, _a )
return config, input_dict
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCAmelCase ( self ) -> Optional[int]:
return TaConfig(
vocab_size=1_66, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return TaConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a )
model.to(_a )
model.eval()
__SCREAMING_SNAKE_CASE = model(
input_ids=_a, decoder_input_ids=_a, attention_mask=_a, decoder_attention_mask=_a, )
__SCREAMING_SNAKE_CASE = model(input_ids=_a, decoder_input_ids=_a )
__SCREAMING_SNAKE_CASE = result.last_hidden_state
__SCREAMING_SNAKE_CASE = result.past_key_values
__SCREAMING_SNAKE_CASE = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_a ), config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ), 4 )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Tuple:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).get_decoder().to(_a ).eval()
# first forward pass
__SCREAMING_SNAKE_CASE = model(_a, use_cache=_a )
__SCREAMING_SNAKE_CASE = model(_a )
__SCREAMING_SNAKE_CASE = model(_a, use_cache=_a )
self.parent.assertTrue(len(_a ) == len(_a ) )
self.parent.assertTrue(len(_a ) == len(_a ) + 1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1), config.vocab_size )
# append to next input_ids and
__SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens], dim=-1 )
__SCREAMING_SNAKE_CASE = model(_a )["last_hidden_state"]
__SCREAMING_SNAKE_CASE = model(_a, past_key_values=_a )["last_hidden_state"]
# select random slice
__SCREAMING_SNAKE_CASE = ids_tensor((1,), output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a, _a, atol=1E-3 ) )
def __lowerCAmelCase ( self, _a, _a, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).to(_a ).half().eval()
__SCREAMING_SNAKE_CASE = model(**_a )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(_a ).any().item() )
@require_torch
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =(
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE__ =(UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ =(
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE__ =[0.8, 0.9]
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = UMTaModel(config_and_inputs[0] ).to(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_a, (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]), f'''{tmpdirname}/t5_test.onnx''', export_params=_a, opset_version=9, input_names=["input_ids", "decoder_input_ids"], )
@unittest.skipIf(torch_device == "cpu", "Cant do half precision" )
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_a )
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = config_and_inputs[0]
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(_a ).eval()
model.to(_a )
__SCREAMING_SNAKE_CASE = {
"head_mask": torch.zeros(config.num_layers, config.num_heads, device=_a ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ),
}
for attn_name, (name, mask) in zip(_a, head_masking.items() ):
__SCREAMING_SNAKE_CASE = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers, config.num_heads, device=_a )
__SCREAMING_SNAKE_CASE = model.generate(
config_and_inputs[1]["input_ids"], num_beams=1, max_length=3, output_attentions=_a, return_dict_in_generate=_a, **_a, )
# We check the state of decoder_attentions and cross_attentions just from the last step
__SCREAMING_SNAKE_CASE = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ), 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def __lowerCAmelCase ( self ) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration.from_pretrained("google/umt5-small", return_dict=_a ).to(_a )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("google/umt5-small", use_fast=_a, legacy=_a )
__SCREAMING_SNAKE_CASE = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
__SCREAMING_SNAKE_CASE = tokenizer(_a, return_tensors="pt", padding=_a ).input_ids
# fmt: off
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(_a, _a )
__SCREAMING_SNAKE_CASE = model.generate(input_ids.to(_a ) )
__SCREAMING_SNAKE_CASE = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ <extra_id_56>ajลกietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajลกie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> ํผํด[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_a )
self.assertEqual(_a, _a )
| 693 | 1 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _A ( __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
__SCREAMING_SNAKE_CASE = ksize + 1
__SCREAMING_SNAKE_CASE = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__snake_case ):
for x in range(__snake_case ):
# distance from center
__SCREAMING_SNAKE_CASE = x - ksize // 2
__SCREAMING_SNAKE_CASE = y - ksize // 2
# degree to radiant
__SCREAMING_SNAKE_CASE = theta / 180 * np.pi
__SCREAMING_SNAKE_CASE = np.cos(_theta )
__SCREAMING_SNAKE_CASE = np.sin(_theta )
# get kernel x
__SCREAMING_SNAKE_CASE = cos_theta * px + sin_theta * py
# get kernel y
__SCREAMING_SNAKE_CASE = -sin_theta * px + cos_theta * py
# fill kernel
__SCREAMING_SNAKE_CASE = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case : Union[str, Any] = imread('../image_data/lena.jpg')
# turn image in gray scale value
_snake_case : List[str] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case : int = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
_snake_case : List[str] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case : Optional[Any] = out / out.max() * 2_55
_snake_case : Union[str, Any] = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 693 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _A ( __snake_case :BertModel , __snake_case :str , __snake_case :str ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
__SCREAMING_SNAKE_CASE = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__snake_case ):
os.makedirs(__snake_case )
__SCREAMING_SNAKE_CASE = model.state_dict()
def to_tf_var_name(__snake_case :str ):
for patt, repl in iter(__snake_case ):
__SCREAMING_SNAKE_CASE = name.replace(__snake_case , __snake_case )
return f'''bert/{name}'''
def create_tf_var(__snake_case :np.ndarray , __snake_case :str , __snake_case :tf.Session ):
__SCREAMING_SNAKE_CASE = tf.dtypes.as_dtype(tensor.dtype )
__SCREAMING_SNAKE_CASE = tf.get_variable(dtype=__snake_case , shape=tensor.shape , name=__snake_case , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__snake_case )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__SCREAMING_SNAKE_CASE = to_tf_var_name(__snake_case )
__SCREAMING_SNAKE_CASE = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__SCREAMING_SNAKE_CASE = torch_tensor.T
__SCREAMING_SNAKE_CASE = create_tf_var(tensor=__snake_case , name=__snake_case , session=__snake_case )
tf.keras.backend.set_value(__snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = session.run(__snake_case )
print(f'''Successfully created {tf_name}: {np.allclose(__snake_case , __snake_case )}''' )
__SCREAMING_SNAKE_CASE = tf.train.Saver(tf.trainable_variables() )
saver.save(__snake_case , os.path.join(__snake_case , model_name.replace("-" , "_" ) + ".ckpt" ) )
def _A ( __snake_case :str=None ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=__snake_case , required=__snake_case , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=__snake_case , default=__snake_case , required=__snake_case , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=__snake_case , required=__snake_case , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=__snake_case , required=__snake_case , help="Directory in which to save tensorflow model" )
__SCREAMING_SNAKE_CASE = parser.parse_args(__snake_case )
__SCREAMING_SNAKE_CASE = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__snake_case , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 693 | 1 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_snake_case : str = HfArgumentParser(InitializationArguments)
_snake_case : Optional[Any] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_snake_case : Any = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
_snake_case : Optional[Any] = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_snake_case : int = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 693 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_snake_case : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =["""input_values""", """padding_mask"""]
def __init__( self, _a = 1, _a = 2_40_00, _a = 0.0, _a = None, _a = None, **_a, ) -> str:
super().__init__(feature_size=_a, sampling_rate=_a, padding_value=_a, **_a )
__SCREAMING_SNAKE_CASE = chunk_length_s
__SCREAMING_SNAKE_CASE = overlap
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1, int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self, _a, _a = None, _a = False, _a = None, _a = None, _a = None, ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = bool(
isinstance(_a, (list, tuple) ) and (isinstance(raw_audio[0], (np.ndarray, tuple, list) )) )
if is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray(_a, dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_a, np.ndarray ):
__SCREAMING_SNAKE_CASE = np.asarray(_a, dtype=np.floataa )
elif isinstance(_a, np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray(_a ).T]
# verify inputs are valid
for idx, example in enumerate(_a ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
__SCREAMING_SNAKE_CASE = min(array.shape[0] for array in raw_audio )
__SCREAMING_SNAKE_CASE = int(np.floor(max_length / self.chunk_stride ) )
__SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
__SCREAMING_SNAKE_CASE = max(array.shape[0] for array in raw_audio )
__SCREAMING_SNAKE_CASE = int(np.ceil(max_length / self.chunk_stride ) )
__SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length
__SCREAMING_SNAKE_CASE = "max_length"
else:
__SCREAMING_SNAKE_CASE = input_values
# normal padding on batch
if padded_inputs is None:
__SCREAMING_SNAKE_CASE = self.pad(
_a, max_length=_a, truncation=_a, padding=_a, return_attention_mask=_a, )
if padding:
__SCREAMING_SNAKE_CASE = padded_inputs.pop("attention_mask" )
__SCREAMING_SNAKE_CASE = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
__SCREAMING_SNAKE_CASE = example[..., None]
input_values.append(example.T )
__SCREAMING_SNAKE_CASE = input_values
if return_tensors is not None:
__SCREAMING_SNAKE_CASE = padded_inputs.convert_to_tensors(_a )
return padded_inputs
| 693 | 1 |
def _A ( __snake_case :List[Any] , __snake_case :Dict ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [1]
for i in range(2 , __snake_case ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = list(range(__snake_case ) )
# Find permutation
while factorials:
__SCREAMING_SNAKE_CASE = factorials.pop()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = divmod(__snake_case , __snake_case )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =42
SCREAMING_SNAKE_CASE__ =42
def __init__( self, _a, _a ) -> Dict:
super().__init__()
self.register_modules(unet=_a, scheduler=_a )
@torch.no_grad()
def __call__( self, _a = 1, _a = 20_00, _a = None, _a = "pil", _a = True, **_a, ) -> Union[ImagePipelineOutput, Tuple]:
__SCREAMING_SNAKE_CASE = self.unet.config.sample_size
__SCREAMING_SNAKE_CASE = (batch_size, 3, img_size, img_size)
__SCREAMING_SNAKE_CASE = self.unet
__SCREAMING_SNAKE_CASE = randn_tensor(_a, generator=_a ) * self.scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE = sample.to(self.device )
self.scheduler.set_timesteps(_a )
self.scheduler.set_sigmas(_a )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__SCREAMING_SNAKE_CASE = self.scheduler.sigmas[i] * torch.ones(shape[0], device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__SCREAMING_SNAKE_CASE = self.unet(_a, _a ).sample
__SCREAMING_SNAKE_CASE = self.scheduler.step_correct(_a, _a, generator=_a ).prev_sample
# prediction step
__SCREAMING_SNAKE_CASE = model(_a, _a ).sample
__SCREAMING_SNAKE_CASE = self.scheduler.step_pred(_a, _a, _a, generator=_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output.prev_sample, output.prev_sample_mean
__SCREAMING_SNAKE_CASE = sample_mean.clamp(0, 1 )
__SCREAMING_SNAKE_CASE = sample.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE = self.numpy_to_pil(_a )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_a )
| 693 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : str = logging.get_logger(__name__)
_snake_case : Optional[int] = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ ="""vivit"""
def __init__( self, _a=2_24, _a=32, _a=[2, 16, 16], _a=3, _a=7_68, _a=12, _a=12, _a=30_72, _a="gelu_fast", _a=0.0, _a=0.0, _a=0.02, _a=1E-0_6, _a=True, **_a, ) -> List[Any]:
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = num_frames
__SCREAMING_SNAKE_CASE = tubelet_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = qkv_bias
super().__init__(**_a )
| 693 |
def _A ( __snake_case :int = 400_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__snake_case )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = b, a + b
return sum(__snake_case )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 693 | 1 |
from math import factorial
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a, _a ) -> int:
__SCREAMING_SNAKE_CASE = real
if isinstance(_a, _a ):
__SCREAMING_SNAKE_CASE = [1] * rank
else:
__SCREAMING_SNAKE_CASE = rank
def __repr__( self ) -> Any:
return (
f'''{self.real}+'''
f'''{"+".join(str(_a )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'''
)
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real, _a )
def __add__( self, _a ) -> str:
if not isinstance(_a, _a ):
return Dual(self.real + other, self.duals )
__SCREAMING_SNAKE_CASE = self.duals.copy()
__SCREAMING_SNAKE_CASE = other.duals.copy()
if len(_a ) > len(_a ):
o_dual.extend([1] * (len(_a ) - len(_a )) )
elif len(_a ) < len(_a ):
s_dual.extend([1] * (len(_a ) - len(_a )) )
__SCREAMING_SNAKE_CASE = []
for i in range(len(_a ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real, _a )
SCREAMING_SNAKE_CASE__ =__add__
def __sub__( self, _a ) -> Any:
return self + other * -1
def __mul__( self, _a ) -> int:
if not isinstance(_a, _a ):
__SCREAMING_SNAKE_CASE = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other, _a )
__SCREAMING_SNAKE_CASE = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real, _a )
SCREAMING_SNAKE_CASE__ =__mul__
def __truediv__( self, _a ) -> Dict:
if not isinstance(_a, _a ):
__SCREAMING_SNAKE_CASE = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other, _a )
raise ValueError
def __floordiv__( self, _a ) -> str:
if not isinstance(_a, _a ):
__SCREAMING_SNAKE_CASE = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other, _a )
raise ValueError
def __pow__( self, _a ) -> Union[str, Any]:
if n < 0 or isinstance(_a, _a ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
__SCREAMING_SNAKE_CASE = self
for _ in range(n - 1 ):
x *= self
return x
def _A ( __snake_case :str , __snake_case :Tuple , __snake_case :List[str] ) -> Union[str, Any]:
"""simple docstring"""
if not callable(__snake_case ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(__snake_case , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(__snake_case , __snake_case ):
raise ValueError("differentiate() requires an int as input for order" )
__SCREAMING_SNAKE_CASE = Dual(__snake_case , 1 )
__SCREAMING_SNAKE_CASE = func(__snake_case )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _A ( __snake_case :Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 693 |
from __future__ import annotations
_snake_case : str = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_snake_case : Optional[int] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = len(__snake_case )
for i in range(__snake_case ):
__SCREAMING_SNAKE_CASE = -1
for j in range(i + 1 , __snake_case ):
if arr[i] < arr[j]:
__SCREAMING_SNAKE_CASE = arr[j]
break
result.append(__snake_case )
return result
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for i, outer in enumerate(__snake_case ):
__SCREAMING_SNAKE_CASE = -1
for inner in arr[i + 1 :]:
if outer < inner:
__SCREAMING_SNAKE_CASE = inner
break
result.append(__snake_case )
return result
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = len(__snake_case )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [-1] * arr_size
for index in reversed(range(__snake_case ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__SCREAMING_SNAKE_CASE = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_snake_case : Optional[Any] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 693 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Any = logging.get_logger(__name__)
_snake_case : Optional[Any] = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ ="""visual_bert"""
def __init__( self, _a=3_05_22, _a=7_68, _a=5_12, _a=12, _a=12, _a=30_72, _a="gelu", _a=0.1, _a=0.1, _a=5_12, _a=2, _a=0.02, _a=1E-1_2, _a=False, _a=True, _a=1, _a=0, _a=2, **_a, ) -> str:
super().__init__(pad_token_id=_a, bos_token_id=_a, eos_token_id=_a, **_a )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = visual_embedding_dim
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = bypass_transformer
__SCREAMING_SNAKE_CASE = special_visual_initialize
| 693 |
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a ) -> Any:
__SCREAMING_SNAKE_CASE = data
__SCREAMING_SNAKE_CASE = None
def __repr__( self ) -> str:
return f'''Node({self.data})'''
class __SCREAMING_SNAKE_CASE :
def __init__( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = None
def __iter__( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.head
while node:
yield node.data
__SCREAMING_SNAKE_CASE = node.next
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join([str(_a ) for item in self] )
def __getitem__( self, _a ) -> Any:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self, _a, _a ) -> None:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
__SCREAMING_SNAKE_CASE = self.head
for _ in range(_a ):
__SCREAMING_SNAKE_CASE = current.next
__SCREAMING_SNAKE_CASE = data
def __lowerCAmelCase ( self, _a ) -> None:
self.insert_nth(len(self ), _a )
def __lowerCAmelCase ( self, _a ) -> None:
self.insert_nth(0, _a )
def __lowerCAmelCase ( self, _a, _a ) -> None:
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
__SCREAMING_SNAKE_CASE = Node(_a )
if self.head is None:
__SCREAMING_SNAKE_CASE = new_node
elif index == 0:
__SCREAMING_SNAKE_CASE = self.head # link new_node to head
__SCREAMING_SNAKE_CASE = new_node
else:
__SCREAMING_SNAKE_CASE = self.head
for _ in range(index - 1 ):
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = new_node
def __lowerCAmelCase ( self ) -> None: # print every node data
print(self )
def __lowerCAmelCase ( self ) -> Any:
return self.delete_nth(0 )
def __lowerCAmelCase ( self ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def __lowerCAmelCase ( self, _a = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
__SCREAMING_SNAKE_CASE = self.head # default first node
if index == 0:
__SCREAMING_SNAKE_CASE = self.head.next
else:
__SCREAMING_SNAKE_CASE = self.head
for _ in range(index - 1 ):
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next.next
return delete_node.data
def __lowerCAmelCase ( self ) -> bool:
return self.head is None
def __lowerCAmelCase ( self ) -> None:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = self.head
while current:
# Store the current node's next node.
__SCREAMING_SNAKE_CASE = current.next
# Make the current node's next point backwards
__SCREAMING_SNAKE_CASE = prev
# Make the previous node be the current node
__SCREAMING_SNAKE_CASE = current
# Make the current node the next node (to progress iteration)
__SCREAMING_SNAKE_CASE = next_node
# Return prev in order to put the head at the end
__SCREAMING_SNAKE_CASE = prev
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = LinkedList()
assert linked_list.is_empty() is True
assert str(__snake_case ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__snake_case ) == i
linked_list.insert_nth(__snake_case , i + 1 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__snake_case ) == 9
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__SCREAMING_SNAKE_CASE = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(-8 , 1 ) )
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [
-9,
100,
Node(7734_5112 ),
"dlrow olleH",
7,
5555,
0,
-1_9_2.5_5_5_5_5,
"Hello, world!",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
__SCREAMING_SNAKE_CASE = LinkedList()
for i in test_input:
linked_list.insert_tail(__snake_case )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__SCREAMING_SNAKE_CASE = linked_list.delete_head()
assert result == -9
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__SCREAMING_SNAKE_CASE = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__SCREAMING_SNAKE_CASE = linked_list.delete_nth(10 )
assert result is None
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__snake_case )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__snake_case )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _A ( ) -> Union[str, Any]:
"""simple docstring"""
from doctest import testmod
testmod()
__SCREAMING_SNAKE_CASE = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(__snake_case )
print("\nReading/changing Node data using indexing:" )
print(f'''Element at Position 1: {linked_list[1]}''' )
__SCREAMING_SNAKE_CASE = input("Enter New Value: " ).strip()
print("New list:" )
print(__snake_case )
print(f'''length of linked_list is : {len(__snake_case )}''' )
if __name__ == "__main__":
main()
| 693 | 1 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_snake_case : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
def __init__( self, *_a, **_a ) -> None:
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead.", _a, )
super().__init__(*_a, **_a )
| 693 |
import argparse
import json
from tqdm import tqdm
def _A ( ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=__snake_case , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=__snake_case , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=__snake_case , help="where to store parsed gold_data_path file" , )
__SCREAMING_SNAKE_CASE = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
__SCREAMING_SNAKE_CASE = json.load(__snake_case )
for dpr_record in tqdm(__snake_case ):
__SCREAMING_SNAKE_CASE = dpr_record["question"]
__SCREAMING_SNAKE_CASE = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(__snake_case ) + "\n" )
if __name__ == "__main__":
main()
| 693 | 1 |
# Algorithm for the pigeonhole sorting
def _A ( __snake_case :List[str] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = min(__snake_case ) # min() finds the minimum value
__SCREAMING_SNAKE_CASE = max(__snake_case ) # max() finds the maximum value
__SCREAMING_SNAKE_CASE = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
__SCREAMING_SNAKE_CASE = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(__snake_case , __snake_case ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
__SCREAMING_SNAKE_CASE = 0
for count in range(__snake_case ):
while holes[count] > 0:
holes[count] -= 1
__SCREAMING_SNAKE_CASE = count + min_val
i += 1
def _A ( ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(__snake_case )
print("Sorted order is:" , " ".join(__snake_case ) )
if __name__ == "__main__":
main()
| 693 |
def _A ( __snake_case :int = 10**9 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 693 | 1 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =IFImgaImgSuperResolutionPipeline
SCREAMING_SNAKE_CASE__ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
SCREAMING_SNAKE_CASE__ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
SCREAMING_SNAKE_CASE__ =PipelineTesterMixin.required_optional_params - {"""latents"""}
def __lowerCAmelCase ( self ) -> int:
return self._get_superresolution_dummy_components()
def __lowerCAmelCase ( self, _a, _a=0 ) -> Dict:
if str(_a ).startswith("mps" ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(_a )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=_a ).manual_seed(_a )
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32), rng=random.Random(_a ) ).to(_a )
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 16, 16), rng=random.Random(_a ) ).to(_a )
__SCREAMING_SNAKE_CASE = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCAmelCase ( self ) -> Any:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA" )
def __lowerCAmelCase ( self ) -> List[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCAmelCase ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCAmelCase ( self ) -> Optional[int]:
self._test_save_load_local()
def __lowerCAmelCase ( self ) -> int:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2, )
| 693 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_snake_case , _snake_case , _snake_case : List[Any] = False, False, False
@dataclass
class __SCREAMING_SNAKE_CASE :
SCREAMING_SNAKE_CASE__ =None
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =None
# Automatically constructed
SCREAMING_SNAKE_CASE__ ="dict"
SCREAMING_SNAKE_CASE__ =pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
SCREAMING_SNAKE_CASE__ =field(default="""Audio""" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self ) -> Optional[int]:
return self.pa_type
def __lowerCAmelCase ( self, _a ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(_a, _a ):
return {"bytes": None, "path": value}
elif isinstance(_a, _a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__SCREAMING_SNAKE_CASE = BytesIO()
sf.write(_a, value["array"], value["sampling_rate"], format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__SCREAMING_SNAKE_CASE = np.frombuffer(value["bytes"], dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
__SCREAMING_SNAKE_CASE = np.memmap(value["path"], dtype="h", mode="r" ).astype(np.floataa ) / 3_27_67
__SCREAMING_SNAKE_CASE = BytesIO(bytes() )
sf.write(_a, _a, value["sampling_rate"], format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __lowerCAmelCase ( self, _a, _a = None ) -> dict:
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
__SCREAMING_SNAKE_CASE = xsplitext(_a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
__SCREAMING_SNAKE_CASE = token_per_repo_id or {}
__SCREAMING_SNAKE_CASE = path.split("::" )[-1]
try:
__SCREAMING_SNAKE_CASE = string_to_dict(_a, config.HUB_DATASETS_URL )["repo_id"]
__SCREAMING_SNAKE_CASE = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__SCREAMING_SNAKE_CASE = None
with xopen(_a, "rb", use_auth_token=_a ) as f:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a )
__SCREAMING_SNAKE_CASE = array.T
if self.mono:
__SCREAMING_SNAKE_CASE = librosa.to_mono(_a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
__SCREAMING_SNAKE_CASE = librosa.resample(_a, orig_sr=_a, target_sr=self.sampling_rate )
__SCREAMING_SNAKE_CASE = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __lowerCAmelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def __lowerCAmelCase ( self, _a ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
__SCREAMING_SNAKE_CASE = pa.array([Audio().encode_example(_a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
__SCREAMING_SNAKE_CASE = storage.field("bytes" )
else:
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
__SCREAMING_SNAKE_CASE = storage.field("path" )
else:
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
return array_cast(_a, self.pa_type )
def __lowerCAmelCase ( self, _a ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_a ):
with xopen(_a, "rb" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
return bytes_
__SCREAMING_SNAKE_CASE = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
__SCREAMING_SNAKE_CASE = pa.array(
[os.path.basename(_a ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(_a, self.pa_type )
| 693 | 1 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_snake_case : List[str] = {'UserAgent': UserAgent().random}
def _A ( __snake_case :List[Any] ) -> dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = script.contents[0]
__SCREAMING_SNAKE_CASE = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = f'''https://www.instagram.com/{username}/'''
__SCREAMING_SNAKE_CASE = self.get_json()
def __lowerCAmelCase ( self ) -> dict:
__SCREAMING_SNAKE_CASE = requests.get(self.url, headers=_a ).text
__SCREAMING_SNAKE_CASE = BeautifulSoup(_a, "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ) -> str:
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ) -> str:
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __lowerCAmelCase ( self ) -> str:
return self.user_data["username"]
@property
def __lowerCAmelCase ( self ) -> str:
return self.user_data["full_name"]
@property
def __lowerCAmelCase ( self ) -> str:
return self.user_data["biography"]
@property
def __lowerCAmelCase ( self ) -> str:
return self.user_data["business_email"]
@property
def __lowerCAmelCase ( self ) -> str:
return self.user_data["external_url"]
@property
def __lowerCAmelCase ( self ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCAmelCase ( self ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def __lowerCAmelCase ( self ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCAmelCase ( self ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCAmelCase ( self ) -> bool:
return self.user_data["is_verified"]
@property
def __lowerCAmelCase ( self ) -> bool:
return self.user_data["is_private"]
def _A ( __snake_case :str = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
__SCREAMING_SNAKE_CASE = InstagramUser(__snake_case )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __snake_case )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 12_0000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case : List[Any] = InstagramUser('github')
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 693 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =(IPNDMScheduler,)
SCREAMING_SNAKE_CASE__ =(("""num_inference_steps""", 50),)
def __lowerCAmelCase ( self, **_a ) -> str:
__SCREAMING_SNAKE_CASE = {"num_train_timesteps": 10_00}
config.update(**_a )
return config
def __lowerCAmelCase ( self, _a=0, **_a ) -> List[Any]:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
if time_step is None:
__SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
__SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a )
new_scheduler.set_timesteps(_a )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self ) -> str:
pass
def __lowerCAmelCase ( self, _a=0, **_a ) -> int:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals (must be after setting timesteps)
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
if time_step is None:
__SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
__SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a )
# copy over dummy past residuals
new_scheduler.set_timesteps(_a )
# copy over dummy past residual (must be after setting timesteps)
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self, **_a ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE = 10
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(_a )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE = model(_a, _a )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE = model(_a, _a )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample
return sample
def __lowerCAmelCase ( self ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
if num_inference_steps is not None and hasattr(_a, "set_timesteps" ):
scheduler.set_timesteps(_a )
elif num_inference_steps is not None and not hasattr(_a, "set_timesteps" ):
__SCREAMING_SNAKE_CASE = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.timesteps[5]
__SCREAMING_SNAKE_CASE = scheduler.timesteps[6]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def __lowerCAmelCase ( self ) -> str:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a, time_step=_a )
def __lowerCAmelCase ( self ) -> Optional[Any]:
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=_a, time_step=_a )
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.full_loop()
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 693 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_snake_case : str = 16
_snake_case : Any = 32
def _A ( __snake_case :Accelerator , __snake_case :int = 16 ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("bert-base-cased" )
__SCREAMING_SNAKE_CASE = load_dataset("glue" , "mrpc" )
def tokenize_function(__snake_case :Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__SCREAMING_SNAKE_CASE = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__SCREAMING_SNAKE_CASE = datasets.map(
__snake_case , batched=__snake_case , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__snake_case :Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__SCREAMING_SNAKE_CASE = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__SCREAMING_SNAKE_CASE = 16
elif accelerator.mixed_precision != "no":
__SCREAMING_SNAKE_CASE = 8
else:
__SCREAMING_SNAKE_CASE = None
return tokenizer.pad(
__snake_case , padding="longest" , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors="pt" , )
# Instantiate dataloaders.
__SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["train"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
__SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["validation"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_snake_case : Optional[Any] = mocked_dataloaders # noqa: F811
def _A ( __snake_case :Tuple , __snake_case :Dict ) -> Optional[Any]:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , __snake_case ) == "1":
__SCREAMING_SNAKE_CASE = 2
# New Code #
__SCREAMING_SNAKE_CASE = int(args.gradient_accumulation_steps )
__SCREAMING_SNAKE_CASE = int(args.local_sgd_steps )
# Initialize accelerator
__SCREAMING_SNAKE_CASE = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__snake_case )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__SCREAMING_SNAKE_CASE = config["lr"]
__SCREAMING_SNAKE_CASE = int(config["num_epochs"] )
__SCREAMING_SNAKE_CASE = int(config["seed"] )
__SCREAMING_SNAKE_CASE = int(config["batch_size"] )
__SCREAMING_SNAKE_CASE = evaluate.load("glue" , "mrpc" )
set_seed(__snake_case )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_dataloaders(__snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__SCREAMING_SNAKE_CASE = model.to(accelerator.device )
# Instantiate optimizer
__SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=__snake_case )
# Instantiate scheduler
__SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
with LocalSGD(
accelerator=__snake_case , model=__snake_case , local_sgd_steps=__snake_case , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__snake_case ):
__SCREAMING_SNAKE_CASE = model(**__snake_case )
__SCREAMING_SNAKE_CASE = output.loss
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**__snake_case )
__SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
__SCREAMING_SNAKE_CASE = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __snake_case )
def _A ( ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=__snake_case , default=__snake_case , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=__snake_case , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument(
"--local_sgd_steps" , type=__snake_case , default=8 , help="Number of local SGD steps or None to disable local SGD" )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 693 |
import random
from .binary_exp_mod import bin_exp_mod
def _A ( __snake_case :List[Any] , __snake_case :Union[str, Any]=1000 ) -> int:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__SCREAMING_SNAKE_CASE = n - 1
__SCREAMING_SNAKE_CASE = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__SCREAMING_SNAKE_CASE = 0
while count < prec:
__SCREAMING_SNAKE_CASE = random.randint(2 , n - 1 )
__SCREAMING_SNAKE_CASE = bin_exp_mod(__snake_case , __snake_case , __snake_case )
if b != 1:
__SCREAMING_SNAKE_CASE = True
for _ in range(__snake_case ):
if b == n - 1:
__SCREAMING_SNAKE_CASE = False
break
__SCREAMING_SNAKE_CASE = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_snake_case : int = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 693 | 1 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _A ( __snake_case :int ) -> Optional[int]:
"""simple docstring"""
def is_in_circle(__snake_case :float , __snake_case :float ) -> bool:
__SCREAMING_SNAKE_CASE = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__SCREAMING_SNAKE_CASE = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__snake_case ) )
# The ratio of the area for circle to square is pi/4.
__SCREAMING_SNAKE_CASE = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def _A ( __snake_case :int , __snake_case :Callable[[float], float] , __snake_case :float = 0.0 , __snake_case :float = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(__snake_case , __snake_case ) ) for _ in range(__snake_case ) ) * (max_value - min_value)
def _A ( __snake_case :int , __snake_case :float = 0.0 , __snake_case :float = 1.0 ) -> None:
"""simple docstring"""
def identity_function(__snake_case :float ) -> float:
return x
__SCREAMING_SNAKE_CASE = area_under_curve_estimator(
__snake_case , __snake_case , __snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print("******************" )
def _A ( __snake_case :int ) -> None:
"""simple docstring"""
def function_to_integrate(__snake_case :float ) -> float:
return sqrt(4.0 - x * x )
__SCREAMING_SNAKE_CASE = area_under_curve_estimator(
__snake_case , __snake_case , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _A ( __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
__SCREAMING_SNAKE_CASE = ksize + 1
__SCREAMING_SNAKE_CASE = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__snake_case ):
for x in range(__snake_case ):
# distance from center
__SCREAMING_SNAKE_CASE = x - ksize // 2
__SCREAMING_SNAKE_CASE = y - ksize // 2
# degree to radiant
__SCREAMING_SNAKE_CASE = theta / 180 * np.pi
__SCREAMING_SNAKE_CASE = np.cos(_theta )
__SCREAMING_SNAKE_CASE = np.sin(_theta )
# get kernel x
__SCREAMING_SNAKE_CASE = cos_theta * px + sin_theta * py
# get kernel y
__SCREAMING_SNAKE_CASE = -sin_theta * px + cos_theta * py
# fill kernel
__SCREAMING_SNAKE_CASE = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case : Union[str, Any] = imread('../image_data/lena.jpg')
# turn image in gray scale value
_snake_case : List[str] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case : int = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
_snake_case : List[str] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case : Optional[Any] = out / out.max() * 2_55
_snake_case : Union[str, Any] = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 693 | 1 |
from math import isqrt
def _A ( __snake_case :int ) -> list[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __snake_case , __snake_case ):
__SCREAMING_SNAKE_CASE = False
return [i for i in range(2 , __snake_case ) if is_prime[i]]
def _A ( __snake_case :int = 10**8 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = calculate_prime_numbers(max_number // 2 )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(__snake_case ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 693 |
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
assert isinstance(__snake_case , __snake_case ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
__SCREAMING_SNAKE_CASE = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(__snake_case )
else:
__SCREAMING_SNAKE_CASE = sylvester(number - 1 )
__SCREAMING_SNAKE_CASE = num - 1
__SCREAMING_SNAKE_CASE = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 693 | 1 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _A ( __snake_case :Namespace ) -> Any:
"""simple docstring"""
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_snake_case : List[Any] = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
@staticmethod
def __lowerCAmelCase ( _a ) -> Tuple:
__SCREAMING_SNAKE_CASE = parser.add_parser(
"convert", help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.", )
train_parser.add_argument("--model_type", type=_a, required=_a, help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint", type=_a, required=_a, help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output", type=_a, required=_a, help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config", type=_a, default="", help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name", type=_a, default=_a, help="Optional fine-tuning task name if the TF model was a finetuned model.", )
train_parser.set_defaults(func=_a )
def __init__( self, _a, _a, _a, _a, _a, *_a, ) -> str:
__SCREAMING_SNAKE_CASE = logging.get_logger("transformers-cli/converting" )
self._logger.info(f'''Loading model {model_type}''' )
__SCREAMING_SNAKE_CASE = model_type
__SCREAMING_SNAKE_CASE = tf_checkpoint
__SCREAMING_SNAKE_CASE = pytorch_dump_output
__SCREAMING_SNAKE_CASE = config
__SCREAMING_SNAKE_CASE = finetuning_task_name
def __lowerCAmelCase ( self ) -> List[str]:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
if "ckpt" in self._tf_checkpoint.lower():
__SCREAMING_SNAKE_CASE = self._tf_checkpoint
__SCREAMING_SNAKE_CASE = ""
else:
__SCREAMING_SNAKE_CASE = self._tf_checkpoint
__SCREAMING_SNAKE_CASE = ""
convert_transfo_xl_checkpoint_to_pytorch(
_a, self._config, self._pytorch_dump_output, _a )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint, self._config, self._pytorch_dump_output, self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 693 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __lowerCAmelCase ( *_a, **_a ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", )
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_a ), [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
], )
@require_tf
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", framework="tf" )
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(_a ), [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
], )
@slow
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", )
# This is an image of 2 cats with remotes and no planes
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_a ), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
@slow
@require_tf
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", framework="tf" )
# This is an image of 2 cats with remotes and no planes
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_a ), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
| 693 | 1 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 693 |
from __future__ import annotations
import math
def _A ( __snake_case :int , __snake_case :int , __snake_case :bool , __snake_case :list[int] , __snake_case :float ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__snake_case ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__SCREAMING_SNAKE_CASE = math.log(len(__snake_case ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 693 | 1 |
def _A ( __snake_case :int , __snake_case :str ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _A ( __snake_case :Optional[Any] , __snake_case :Tuple , __snake_case :List[Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0
while b > 0:
if b & 1:
__SCREAMING_SNAKE_CASE = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 693 |
def _A ( __snake_case :bytes ) -> str:
"""simple docstring"""
return "".join([hex(__snake_case )[2:].zfill(2 ).upper() for byte in list(__snake_case )] )
def _A ( __snake_case :str ) -> bytes:
"""simple docstring"""
if (len(__snake_case ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__snake_case ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(__snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 | 1 |
def _A ( ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 1
while len(__snake_case ) < 1e6:
constant.append(str(__snake_case ) )
i += 1
__SCREAMING_SNAKE_CASE = "".join(__snake_case )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 693 |
from functools import lru_cache
def _A ( __snake_case :int ) -> set:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__snake_case )
if n > 1:
factors.add(__snake_case )
return factors
@lru_cache
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
return len(unique_prime_factors(__snake_case ) )
def _A ( __snake_case :list ) -> bool:
"""simple docstring"""
return len(set(__snake_case ) ) in (0, 1)
def _A ( __snake_case :int ) -> list:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 2
while True:
# Increment each value of a generated range
__SCREAMING_SNAKE_CASE = [base + i for i in range(__snake_case )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__SCREAMING_SNAKE_CASE = [upf_len(__snake_case ) for x in group]
checker.append(__snake_case )
# If all numbers in the list are equal, return the group variable.
if equality(__snake_case ):
return group
# Increment our base variable by 1
base += 1
def _A ( __snake_case :int = 4 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = run(__snake_case )
return results[0] if len(__snake_case ) else None
if __name__ == "__main__":
print(solution())
| 693 | 1 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a, _a=99, _a=13, _a=7, _a=9, _a=True, _a=True, _a=False, _a=32, _a=5, _a=4, _a=37, _a=8, _a=0.1, _a=0.002, _a=1, _a=0, _a=0, _a=None, _a=None, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = encoder_seq_length
__SCREAMING_SNAKE_CASE = decoder_seq_length
# For common tests
__SCREAMING_SNAKE_CASE = self.decoder_seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_attention_mask
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = d_ff
__SCREAMING_SNAKE_CASE = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE = dropout_rate
__SCREAMING_SNAKE_CASE = initializer_factor
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = decoder_start_token_id
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = decoder_layers
def __lowerCAmelCase ( self ) -> Optional[int]:
return TaConfig.from_pretrained("google/umt5-base" )
def __lowerCAmelCase ( self, _a, _a, _a, _a=None, _a=None, _a=None, _a=None, _a=None, ) -> int:
if attention_mask is None:
__SCREAMING_SNAKE_CASE = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.num_hidden_layers, config.num_attention_heads, device=_a )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.num_decoder_layers, config.num_attention_heads, device=_a )
if cross_attn_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers, config.num_attention_heads, device=_a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__SCREAMING_SNAKE_CASE = input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = decoder_input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = config.num_attention_heads
__SCREAMING_SNAKE_CASE = self.prepare_inputs_dict(_a, _a, _a )
return config, input_dict
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCAmelCase ( self ) -> Optional[int]:
return TaConfig(
vocab_size=1_66, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return TaConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a )
model.to(_a )
model.eval()
__SCREAMING_SNAKE_CASE = model(
input_ids=_a, decoder_input_ids=_a, attention_mask=_a, decoder_attention_mask=_a, )
__SCREAMING_SNAKE_CASE = model(input_ids=_a, decoder_input_ids=_a )
__SCREAMING_SNAKE_CASE = result.last_hidden_state
__SCREAMING_SNAKE_CASE = result.past_key_values
__SCREAMING_SNAKE_CASE = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_a ), config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ), 4 )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Tuple:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).get_decoder().to(_a ).eval()
# first forward pass
__SCREAMING_SNAKE_CASE = model(_a, use_cache=_a )
__SCREAMING_SNAKE_CASE = model(_a )
__SCREAMING_SNAKE_CASE = model(_a, use_cache=_a )
self.parent.assertTrue(len(_a ) == len(_a ) )
self.parent.assertTrue(len(_a ) == len(_a ) + 1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1), config.vocab_size )
# append to next input_ids and
__SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens], dim=-1 )
__SCREAMING_SNAKE_CASE = model(_a )["last_hidden_state"]
__SCREAMING_SNAKE_CASE = model(_a, past_key_values=_a )["last_hidden_state"]
# select random slice
__SCREAMING_SNAKE_CASE = ids_tensor((1,), output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a, _a, atol=1E-3 ) )
def __lowerCAmelCase ( self, _a, _a, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).to(_a ).half().eval()
__SCREAMING_SNAKE_CASE = model(**_a )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(_a ).any().item() )
@require_torch
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =(
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE__ =(UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ =(
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE__ =[0.8, 0.9]
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = UMTaModel(config_and_inputs[0] ).to(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_a, (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]), f'''{tmpdirname}/t5_test.onnx''', export_params=_a, opset_version=9, input_names=["input_ids", "decoder_input_ids"], )
@unittest.skipIf(torch_device == "cpu", "Cant do half precision" )
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_a )
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = config_and_inputs[0]
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(_a ).eval()
model.to(_a )
__SCREAMING_SNAKE_CASE = {
"head_mask": torch.zeros(config.num_layers, config.num_heads, device=_a ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ),
}
for attn_name, (name, mask) in zip(_a, head_masking.items() ):
__SCREAMING_SNAKE_CASE = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers, config.num_heads, device=_a )
__SCREAMING_SNAKE_CASE = model.generate(
config_and_inputs[1]["input_ids"], num_beams=1, max_length=3, output_attentions=_a, return_dict_in_generate=_a, **_a, )
# We check the state of decoder_attentions and cross_attentions just from the last step
__SCREAMING_SNAKE_CASE = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ), 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def __lowerCAmelCase ( self ) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration.from_pretrained("google/umt5-small", return_dict=_a ).to(_a )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("google/umt5-small", use_fast=_a, legacy=_a )
__SCREAMING_SNAKE_CASE = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
__SCREAMING_SNAKE_CASE = tokenizer(_a, return_tensors="pt", padding=_a ).input_ids
# fmt: off
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(_a, _a )
__SCREAMING_SNAKE_CASE = model.generate(input_ids.to(_a ) )
__SCREAMING_SNAKE_CASE = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ <extra_id_56>ajลกietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajลกie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> ํผํด[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_a )
self.assertEqual(_a, _a )
| 693 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _A ( __snake_case :Dict ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = VideoMAEConfig()
set_architecture_configs(__snake_case , __snake_case )
if "finetuned" not in model_name:
__SCREAMING_SNAKE_CASE = False
if "finetuned" in model_name:
__SCREAMING_SNAKE_CASE = "huggingface/label-files"
if "kinetics" in model_name:
__SCREAMING_SNAKE_CASE = 400
__SCREAMING_SNAKE_CASE = "kinetics400-id2label.json"
elif "ssv2" in model_name:
__SCREAMING_SNAKE_CASE = 174
__SCREAMING_SNAKE_CASE = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
__SCREAMING_SNAKE_CASE = {int(__snake_case ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def _A ( __snake_case :Dict , __snake_case :Optional[Any] ) -> List[Any]:
"""simple docstring"""
if "small" in model_name:
__SCREAMING_SNAKE_CASE = 384
__SCREAMING_SNAKE_CASE = 1536
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = 192
__SCREAMING_SNAKE_CASE = 768
elif "large" in model_name:
__SCREAMING_SNAKE_CASE = 1024
__SCREAMING_SNAKE_CASE = 4096
__SCREAMING_SNAKE_CASE = 24
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 512
__SCREAMING_SNAKE_CASE = 2048
elif "huge" in model_name:
__SCREAMING_SNAKE_CASE = 1280
__SCREAMING_SNAKE_CASE = 5120
__SCREAMING_SNAKE_CASE = 32
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 640
__SCREAMING_SNAKE_CASE = 2560
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def _A ( __snake_case :List[Any] ) -> Optional[int]:
"""simple docstring"""
if "encoder." in name:
__SCREAMING_SNAKE_CASE = name.replace("encoder." , "" )
if "cls_token" in name:
__SCREAMING_SNAKE_CASE = name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.self" )
if "attn" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.attention" )
if "norm1" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__SCREAMING_SNAKE_CASE = name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__SCREAMING_SNAKE_CASE = name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace("head" , "classifier" )
return name
def _A ( __snake_case :Union[str, Any] , __snake_case :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = orig_state_dict.pop(__snake_case )
if key.startswith("encoder." ):
__SCREAMING_SNAKE_CASE = key.replace("encoder." , "" )
if "qkv" in key:
__SCREAMING_SNAKE_CASE = key.split("." )
if key.startswith("decoder.blocks" ):
__SCREAMING_SNAKE_CASE = config.decoder_hidden_size
__SCREAMING_SNAKE_CASE = int(key_split[2] )
__SCREAMING_SNAKE_CASE = "decoder.decoder_layers."
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = config.hidden_size
__SCREAMING_SNAKE_CASE = int(key_split[1] )
__SCREAMING_SNAKE_CASE = "videomae.encoder.layer."
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = val
return orig_state_dict
def _A ( ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
__SCREAMING_SNAKE_CASE = np.load(__snake_case )
return list(__snake_case )
def _A ( __snake_case :Optional[int] , __snake_case :List[str] , __snake_case :Union[str, Any] , __snake_case :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_videomae_config(__snake_case )
if "finetuned" in model_name:
__SCREAMING_SNAKE_CASE = VideoMAEForVideoClassification(__snake_case )
else:
__SCREAMING_SNAKE_CASE = VideoMAEForPreTraining(__snake_case )
# download original checkpoint, hosted on Google Drive
__SCREAMING_SNAKE_CASE = "pytorch_model.bin"
gdown.cached_download(__snake_case , __snake_case , quiet=__snake_case )
__SCREAMING_SNAKE_CASE = torch.load(__snake_case , map_location="cpu" )
if "model" in files:
__SCREAMING_SNAKE_CASE = files["model"]
else:
__SCREAMING_SNAKE_CASE = files["module"]
__SCREAMING_SNAKE_CASE = convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
model.eval()
# verify model on basic input
__SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__SCREAMING_SNAKE_CASE = prepare_video()
__SCREAMING_SNAKE_CASE = image_processor(__snake_case , return_tensors="pt" )
if "finetuned" not in model_name:
__SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
__SCREAMING_SNAKE_CASE = torch.load(__snake_case )
__SCREAMING_SNAKE_CASE = model(**__snake_case )
__SCREAMING_SNAKE_CASE = outputs.logits
__SCREAMING_SNAKE_CASE = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] )
elif model_name == "videomae-small-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] )
elif model_name == "videomae-base":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] )
elif model_name == "videomae-base-short":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] )
# we verified the loss both for normalized and unnormalized targets for this one
__SCREAMING_SNAKE_CASE = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] )
elif model_name == "videomae-large":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] )
elif model_name == "videomae-large-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] )
elif model_name == "videomae-huge-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] )
elif model_name == "videomae-base-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] )
elif model_name == "videomae-base-short-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] )
elif model_name == "videomae-base-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] )
elif model_name == "videomae-base-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __snake_case , atol=1e-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__SCREAMING_SNAKE_CASE = outputs.loss
assert torch.allclose(__snake_case , __snake_case , atol=1e-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
model.save_pretrained(__snake_case )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(__snake_case , organization="nielsr" )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the ๐ค hub.'
)
_snake_case : Optional[int] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 693 | 1 |
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
assert isinstance(__snake_case , __snake_case ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
__SCREAMING_SNAKE_CASE = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(__snake_case )
else:
__SCREAMING_SNAKE_CASE = sylvester(number - 1 )
__SCREAMING_SNAKE_CASE = num - 1
__SCREAMING_SNAKE_CASE = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 693 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_snake_case : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
def __init__( self, *_a, **_a ) -> None:
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead.", _a, )
super().__init__(*_a, **_a )
| 693 | 1 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a, _a=13, _a=30, _a=2, _a=3, _a=True, _a=True, _a=32, _a=2, _a=4, _a=37, _a="gelu", _a=0.1, _a=0.1, _a=10, _a=0.02, _a=3, _a=None, _a=2, ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE = num_patches + 2
def __lowerCAmelCase ( self ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size], self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) -> List[str]:
return DeiTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=_a, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def __lowerCAmelCase ( self, _a, _a, _a ) -> Tuple:
__SCREAMING_SNAKE_CASE = TFDeiTModel(config=_a )
__SCREAMING_SNAKE_CASE = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self, _a, _a, _a ) -> Dict:
__SCREAMING_SNAKE_CASE = TFDeiTForMaskedImageModeling(config=_a )
__SCREAMING_SNAKE_CASE = model(_a )
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = TFDeiTForMaskedImageModeling(_a )
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = model(_a )
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self, _a, _a, _a ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE = TFDeiTForImageClassification(_a )
__SCREAMING_SNAKE_CASE = model(_a, labels=_a )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = TFDeiTForImageClassification(_a )
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = model(_a, labels=_a )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =(
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE__ =(
{
"""feature-extraction""": TFDeiTModel,
"""image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = TFDeiTModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self, config_class=_a, has_text_modality=_a, hidden_size=37 )
def __lowerCAmelCase ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def __lowerCAmelCase ( self ) -> Optional[int]:
pass
def __lowerCAmelCase ( self ) -> Optional[int]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(_a )
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) )
__SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a, tf.keras.layers.Dense ) )
def __lowerCAmelCase ( self ) -> int:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(_a )
__SCREAMING_SNAKE_CASE = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1], _a )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_a )
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def __lowerCAmelCase ( self, _a, _a, _a=False ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = super()._prepare_for_class(_a, _a, return_labels=_a )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = TFDeiTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _A ( ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> str:
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=_a, return_tensors="tf" )
# forward pass
__SCREAMING_SNAKE_CASE = model(**_a )
# verify the logits
__SCREAMING_SNAKE_CASE = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape, _a )
__SCREAMING_SNAKE_CASE = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3], _a, atol=1E-4 ) )
| 693 |
from math import sqrt
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0
for i in range(1 , int(sqrt(__snake_case ) + 1 ) ):
if n % i == 0 and i != sqrt(__snake_case ):
total += i + n // i
elif i == sqrt(__snake_case ):
total += i
return total - n
def _A ( __snake_case :int = 1_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = sum(
i
for i in range(1 , __snake_case )
if sum_of_divisors(sum_of_divisors(__snake_case ) ) == i and sum_of_divisors(__snake_case ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 693 | 1 |
def _A ( __snake_case :int = 100_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = limit + 1
__SCREAMING_SNAKE_CASE = [0] * limit
for first_term in range(1 , __snake_case ):
for n in range(__snake_case , __snake_case , __snake_case ):
__SCREAMING_SNAKE_CASE = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
__SCREAMING_SNAKE_CASE = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 693 |
def _A ( __snake_case :int , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case : Union[str, Any] = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
_snake_case : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a, _a=99, _a=13, _a=7, _a=9, _a=True, _a=True, _a=False, _a=32, _a=5, _a=4, _a=37, _a=8, _a=0.1, _a=0.002, _a=1, _a=0, _a=0, _a=None, _a=None, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = encoder_seq_length
__SCREAMING_SNAKE_CASE = decoder_seq_length
# For common tests
__SCREAMING_SNAKE_CASE = self.decoder_seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_attention_mask
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = d_ff
__SCREAMING_SNAKE_CASE = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE = dropout_rate
__SCREAMING_SNAKE_CASE = initializer_factor
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = decoder_start_token_id
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = decoder_layers
def __lowerCAmelCase ( self ) -> Optional[int]:
return TaConfig.from_pretrained("google/umt5-base" )
def __lowerCAmelCase ( self, _a, _a, _a, _a=None, _a=None, _a=None, _a=None, _a=None, ) -> int:
if attention_mask is None:
__SCREAMING_SNAKE_CASE = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.num_hidden_layers, config.num_attention_heads, device=_a )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.num_decoder_layers, config.num_attention_heads, device=_a )
if cross_attn_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers, config.num_attention_heads, device=_a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__SCREAMING_SNAKE_CASE = input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = decoder_input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = config.num_attention_heads
__SCREAMING_SNAKE_CASE = self.prepare_inputs_dict(_a, _a, _a )
return config, input_dict
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCAmelCase ( self ) -> Optional[int]:
return TaConfig(
vocab_size=1_66, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return TaConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a )
model.to(_a )
model.eval()
__SCREAMING_SNAKE_CASE = model(
input_ids=_a, decoder_input_ids=_a, attention_mask=_a, decoder_attention_mask=_a, )
__SCREAMING_SNAKE_CASE = model(input_ids=_a, decoder_input_ids=_a )
__SCREAMING_SNAKE_CASE = result.last_hidden_state
__SCREAMING_SNAKE_CASE = result.past_key_values
__SCREAMING_SNAKE_CASE = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_a ), config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ), 4 )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Tuple:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).get_decoder().to(_a ).eval()
# first forward pass
__SCREAMING_SNAKE_CASE = model(_a, use_cache=_a )
__SCREAMING_SNAKE_CASE = model(_a )
__SCREAMING_SNAKE_CASE = model(_a, use_cache=_a )
self.parent.assertTrue(len(_a ) == len(_a ) )
self.parent.assertTrue(len(_a ) == len(_a ) + 1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1), config.vocab_size )
# append to next input_ids and
__SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens], dim=-1 )
__SCREAMING_SNAKE_CASE = model(_a )["last_hidden_state"]
__SCREAMING_SNAKE_CASE = model(_a, past_key_values=_a )["last_hidden_state"]
# select random slice
__SCREAMING_SNAKE_CASE = ids_tensor((1,), output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a, _a, atol=1E-3 ) )
def __lowerCAmelCase ( self, _a, _a, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).to(_a ).half().eval()
__SCREAMING_SNAKE_CASE = model(**_a )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(_a ).any().item() )
@require_torch
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =(
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE__ =(UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ =(
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE__ =[0.8, 0.9]
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = UMTaModel(config_and_inputs[0] ).to(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_a, (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]), f'''{tmpdirname}/t5_test.onnx''', export_params=_a, opset_version=9, input_names=["input_ids", "decoder_input_ids"], )
@unittest.skipIf(torch_device == "cpu", "Cant do half precision" )
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_a )
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = config_and_inputs[0]
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(_a ).eval()
model.to(_a )
__SCREAMING_SNAKE_CASE = {
"head_mask": torch.zeros(config.num_layers, config.num_heads, device=_a ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ),
}
for attn_name, (name, mask) in zip(_a, head_masking.items() ):
__SCREAMING_SNAKE_CASE = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers, config.num_heads, device=_a )
__SCREAMING_SNAKE_CASE = model.generate(
config_and_inputs[1]["input_ids"], num_beams=1, max_length=3, output_attentions=_a, return_dict_in_generate=_a, **_a, )
# We check the state of decoder_attentions and cross_attentions just from the last step
__SCREAMING_SNAKE_CASE = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ), 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def __lowerCAmelCase ( self ) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration.from_pretrained("google/umt5-small", return_dict=_a ).to(_a )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("google/umt5-small", use_fast=_a, legacy=_a )
__SCREAMING_SNAKE_CASE = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
__SCREAMING_SNAKE_CASE = tokenizer(_a, return_tensors="pt", padding=_a ).input_ids
# fmt: off
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(_a, _a )
__SCREAMING_SNAKE_CASE = model.generate(input_ids.to(_a ) )
__SCREAMING_SNAKE_CASE = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ <extra_id_56>ajลกietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajลกie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> ํผํด[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_a )
self.assertEqual(_a, _a )
| 693 | 1 |
import random
from .binary_exp_mod import bin_exp_mod
def _A ( __snake_case :List[Any] , __snake_case :Union[str, Any]=1000 ) -> int:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__SCREAMING_SNAKE_CASE = n - 1
__SCREAMING_SNAKE_CASE = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__SCREAMING_SNAKE_CASE = 0
while count < prec:
__SCREAMING_SNAKE_CASE = random.randint(2 , n - 1 )
__SCREAMING_SNAKE_CASE = bin_exp_mod(__snake_case , __snake_case , __snake_case )
if b != 1:
__SCREAMING_SNAKE_CASE = True
for _ in range(__snake_case ):
if b == n - 1:
__SCREAMING_SNAKE_CASE = False
break
__SCREAMING_SNAKE_CASE = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_snake_case : int = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 693 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _A ( __snake_case :BertModel , __snake_case :str , __snake_case :str ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
__SCREAMING_SNAKE_CASE = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__snake_case ):
os.makedirs(__snake_case )
__SCREAMING_SNAKE_CASE = model.state_dict()
def to_tf_var_name(__snake_case :str ):
for patt, repl in iter(__snake_case ):
__SCREAMING_SNAKE_CASE = name.replace(__snake_case , __snake_case )
return f'''bert/{name}'''
def create_tf_var(__snake_case :np.ndarray , __snake_case :str , __snake_case :tf.Session ):
__SCREAMING_SNAKE_CASE = tf.dtypes.as_dtype(tensor.dtype )
__SCREAMING_SNAKE_CASE = tf.get_variable(dtype=__snake_case , shape=tensor.shape , name=__snake_case , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__snake_case )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__SCREAMING_SNAKE_CASE = to_tf_var_name(__snake_case )
__SCREAMING_SNAKE_CASE = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__SCREAMING_SNAKE_CASE = torch_tensor.T
__SCREAMING_SNAKE_CASE = create_tf_var(tensor=__snake_case , name=__snake_case , session=__snake_case )
tf.keras.backend.set_value(__snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = session.run(__snake_case )
print(f'''Successfully created {tf_name}: {np.allclose(__snake_case , __snake_case )}''' )
__SCREAMING_SNAKE_CASE = tf.train.Saver(tf.trainable_variables() )
saver.save(__snake_case , os.path.join(__snake_case , model_name.replace("-" , "_" ) + ".ckpt" ) )
def _A ( __snake_case :str=None ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=__snake_case , required=__snake_case , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=__snake_case , default=__snake_case , required=__snake_case , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=__snake_case , required=__snake_case , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=__snake_case , required=__snake_case , help="Directory in which to save tensorflow model" )
__SCREAMING_SNAKE_CASE = parser.parse_args(__snake_case )
__SCREAMING_SNAKE_CASE = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__snake_case , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 693 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self, _a, _a=7, _a=3, _a=18, _a=30, _a=4_00, _a=True, _a=None, _a=True, ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = size if size is not None else {"height": 18, "width": 18}
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = min_resolution
__SCREAMING_SNAKE_CASE = max_resolution
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = apply_ocr
def __lowerCAmelCase ( self ) -> str:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = LayoutLMvaImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> int:
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a, "do_resize" ) )
self.assertTrue(hasattr(_a, "size" ) )
self.assertTrue(hasattr(_a, "apply_ocr" ) )
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"height": 18, "width": 18} )
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {"height": 42, "width": 42} )
def __lowerCAmelCase ( self ) -> str:
pass
def __lowerCAmelCase ( self ) -> Tuple:
# Initialize image_processing
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester, equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a, Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0], return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
self.assertIsInstance(encoding.words, _a )
self.assertIsInstance(encoding.boxes, _a )
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(_a, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
def __lowerCAmelCase ( self ) -> str:
# Initialize image_processing
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester, equal_resolution=_a, numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a, np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(_a, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
def __lowerCAmelCase ( self ) -> Dict:
# Initialize image_processing
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester, equal_resolution=_a, torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a, torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(_a, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
def __lowerCAmelCase ( self ) -> str:
# with apply_OCR = True
__SCREAMING_SNAKE_CASE = LayoutLMvaImageProcessor()
from datasets import load_dataset
__SCREAMING_SNAKE_CASE = load_dataset("hf-internal-testing/fixtures_docvqa", split="test" )
__SCREAMING_SNAKE_CASE = Image.open(ds[0]["file"] ).convert("RGB" )
__SCREAMING_SNAKE_CASE = image_processing(_a, return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ), len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__SCREAMING_SNAKE_CASE = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "โIntroductory", "Remarksโ", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
__SCREAMING_SNAKE_CASE = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words, _a )
self.assertListEqual(encoding.boxes, _a )
# with apply_OCR = False
__SCREAMING_SNAKE_CASE = LayoutLMvaImageProcessor(apply_ocr=_a )
__SCREAMING_SNAKE_CASE = image_processing(_a, return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) )
| 693 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_snake_case : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =["""input_values""", """padding_mask"""]
def __init__( self, _a = 1, _a = 2_40_00, _a = 0.0, _a = None, _a = None, **_a, ) -> str:
super().__init__(feature_size=_a, sampling_rate=_a, padding_value=_a, **_a )
__SCREAMING_SNAKE_CASE = chunk_length_s
__SCREAMING_SNAKE_CASE = overlap
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1, int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self, _a, _a = None, _a = False, _a = None, _a = None, _a = None, ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = bool(
isinstance(_a, (list, tuple) ) and (isinstance(raw_audio[0], (np.ndarray, tuple, list) )) )
if is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray(_a, dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_a, np.ndarray ):
__SCREAMING_SNAKE_CASE = np.asarray(_a, dtype=np.floataa )
elif isinstance(_a, np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray(_a ).T]
# verify inputs are valid
for idx, example in enumerate(_a ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
__SCREAMING_SNAKE_CASE = min(array.shape[0] for array in raw_audio )
__SCREAMING_SNAKE_CASE = int(np.floor(max_length / self.chunk_stride ) )
__SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
__SCREAMING_SNAKE_CASE = max(array.shape[0] for array in raw_audio )
__SCREAMING_SNAKE_CASE = int(np.ceil(max_length / self.chunk_stride ) )
__SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length
__SCREAMING_SNAKE_CASE = "max_length"
else:
__SCREAMING_SNAKE_CASE = input_values
# normal padding on batch
if padded_inputs is None:
__SCREAMING_SNAKE_CASE = self.pad(
_a, max_length=_a, truncation=_a, padding=_a, return_attention_mask=_a, )
if padding:
__SCREAMING_SNAKE_CASE = padded_inputs.pop("attention_mask" )
__SCREAMING_SNAKE_CASE = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
__SCREAMING_SNAKE_CASE = example[..., None]
input_values.append(example.T )
__SCREAMING_SNAKE_CASE = input_values
if return_tensors is not None:
__SCREAMING_SNAKE_CASE = padded_inputs.convert_to_tensors(_a )
return padded_inputs
| 693 | 1 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_snake_case : Dict = '\\n Text data.\n Second line of data.'
_snake_case : List[Any] = 'file'
@pytest.fixture(scope="session" )
def _A ( __snake_case :Optional[int] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
__SCREAMING_SNAKE_CASE = bytes(__snake_case , "utf-8" )
with zstd.open(__snake_case , "wb" ) as f:
f.write(__snake_case )
return path
@pytest.fixture
def _A ( __snake_case :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , __snake_case ) , "w" ) as f:
f.write(__snake_case )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def _A ( __snake_case :List[str] , __snake_case :Any , __snake_case :Optional[int] , __snake_case :Optional[Any] , __snake_case :Optional[int] , __snake_case :List[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
__SCREAMING_SNAKE_CASE = input_paths[compression_format]
__SCREAMING_SNAKE_CASE = tmp_path / "cache"
__SCREAMING_SNAKE_CASE = DownloadConfig(cache_dir=__snake_case , extract_compressed_file=__snake_case )
__SCREAMING_SNAKE_CASE = cached_path(__snake_case , download_config=__snake_case )
with open(__snake_case ) as f:
__SCREAMING_SNAKE_CASE = f.read()
with open(__snake_case ) as f:
__SCREAMING_SNAKE_CASE = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def _A ( __snake_case :str , __snake_case :Optional[int] , __snake_case :Tuple , __snake_case :List[Any] , __snake_case :Union[str, Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = "custom_cache"
__SCREAMING_SNAKE_CASE = "custom_extracted_dir"
__SCREAMING_SNAKE_CASE = tmp_path / "custom_extracted_path"
if default_extracted:
__SCREAMING_SNAKE_CASE = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , __snake_case )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(__snake_case ) )
__SCREAMING_SNAKE_CASE = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__SCREAMING_SNAKE_CASE = xz_file
__SCREAMING_SNAKE_CASE = (
DownloadConfig(extract_compressed_file=__snake_case )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__snake_case )
)
__SCREAMING_SNAKE_CASE = cached_path(__snake_case , download_config=__snake_case )
assert Path(__snake_case ).parent.parts[-2:] == expected
def _A ( __snake_case :List[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = str(Path(__snake_case ).resolve() )
assert cached_path(__snake_case ) == text_file
# relative path
__SCREAMING_SNAKE_CASE = str(Path(__snake_case ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__snake_case ) == text_file
def _A ( __snake_case :str ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(__snake_case ):
cached_path(__snake_case )
# relative path
__SCREAMING_SNAKE_CASE = "./__missing_file__.txt"
with pytest.raises(__snake_case ):
cached_path(__snake_case )
def _A ( __snake_case :Any ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_from_cache(f'''tmp://{tmpfs_file}''' )
with open(__snake_case ) as f:
__SCREAMING_SNAKE_CASE = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , __snake_case )
def _A ( ) -> List[Any]:
"""simple docstring"""
with pytest.raises(__snake_case ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __snake_case )
def _A ( __snake_case :Any ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__snake_case ):
http_get("https://huggingface.co" , temp_file=__snake_case )
with pytest.raises(__snake_case ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __snake_case )
def _A ( __snake_case :str ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__snake_case ):
ftp_get("ftp://huggingface.co" , temp_file=__snake_case )
with pytest.raises(__snake_case ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __snake_case )
def _A ( __snake_case :Optional[int] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__snake_case ):
fsspec_get("s3://huggingface.co" , temp_file=__snake_case )
with pytest.raises(__snake_case ):
fsspec_head("s3://huggingface.co" )
| 693 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =42
SCREAMING_SNAKE_CASE__ =42
def __init__( self, _a, _a ) -> Dict:
super().__init__()
self.register_modules(unet=_a, scheduler=_a )
@torch.no_grad()
def __call__( self, _a = 1, _a = 20_00, _a = None, _a = "pil", _a = True, **_a, ) -> Union[ImagePipelineOutput, Tuple]:
__SCREAMING_SNAKE_CASE = self.unet.config.sample_size
__SCREAMING_SNAKE_CASE = (batch_size, 3, img_size, img_size)
__SCREAMING_SNAKE_CASE = self.unet
__SCREAMING_SNAKE_CASE = randn_tensor(_a, generator=_a ) * self.scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE = sample.to(self.device )
self.scheduler.set_timesteps(_a )
self.scheduler.set_sigmas(_a )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__SCREAMING_SNAKE_CASE = self.scheduler.sigmas[i] * torch.ones(shape[0], device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__SCREAMING_SNAKE_CASE = self.unet(_a, _a ).sample
__SCREAMING_SNAKE_CASE = self.scheduler.step_correct(_a, _a, generator=_a ).prev_sample
# prediction step
__SCREAMING_SNAKE_CASE = model(_a, _a ).sample
__SCREAMING_SNAKE_CASE = self.scheduler.step_pred(_a, _a, _a, generator=_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output.prev_sample, output.prev_sample_mean
__SCREAMING_SNAKE_CASE = sample_mean.clamp(0, 1 )
__SCREAMING_SNAKE_CASE = sample.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE = self.numpy_to_pil(_a )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_a )
| 693 | 1 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
_snake_case : Union[str, Any] = False
_snake_case : Union[str, Any] = True
_snake_case : Optional[Any] = False
if __name__ == "__main__":
_snake_case : int = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
_snake_case : Optional[int] = parser.parse_args()
_snake_case : Tuple = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
_snake_case : List[Any] = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
_snake_case : Optional[Any] = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
_snake_case : Any = reader.read()
_snake_case : str = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
_snake_case : int = UNetaDModel(**config)
else:
_snake_case : int = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
_snake_case : Optional[Any] = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
_snake_case : Any = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
_snake_case : List[Any] = config[key]
del config[key]
_snake_case : Any = [k.replace('UNetRes', '') for k in config['down_block_types']]
_snake_case : List[str] = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
_snake_case : Optional[Any] = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
_snake_case : Optional[Any] = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
_snake_case : Optional[int] = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
_snake_case : int = param_value
_snake_case : Tuple = True
if not has_changed:
_snake_case : Optional[Any] = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 693 |
def _A ( __snake_case :int = 400_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__snake_case )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = b, a + b
return sum(__snake_case )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 693 | 1 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _A ( __snake_case :str ) -> Tuple:
"""simple docstring"""
if is_torch_version("<" , "2.0.0" ) or not hasattr(__snake_case , "_dynamo" ):
return False
return isinstance(__snake_case , torch._dynamo.eval_frame.OptimizedModule )
def _A ( __snake_case :Dict , __snake_case :bool = True ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__SCREAMING_SNAKE_CASE = is_compiled_module(__snake_case )
if is_compiled:
__SCREAMING_SNAKE_CASE = model
__SCREAMING_SNAKE_CASE = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__snake_case , __snake_case ):
__SCREAMING_SNAKE_CASE = model.module
if not keep_fpaa_wrapper:
__SCREAMING_SNAKE_CASE = getattr(__snake_case , "forward" )
__SCREAMING_SNAKE_CASE = model.__dict__.pop("_original_forward" , __snake_case )
if original_forward is not None:
while hasattr(__snake_case , "__wrapped__" ):
__SCREAMING_SNAKE_CASE = forward.__wrapped__
if forward == original_forward:
break
__SCREAMING_SNAKE_CASE = forward
if getattr(__snake_case , "_converted_to_transformer_engine" , __snake_case ):
convert_model(__snake_case , to_transformer_engine=__snake_case )
if is_compiled:
__SCREAMING_SNAKE_CASE = model
__SCREAMING_SNAKE_CASE = compiled_model
return model
def _A ( ) -> Any:
"""simple docstring"""
PartialState().wait_for_everyone()
def _A ( __snake_case :List[str] , __snake_case :List[str] ) -> Optional[int]:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__snake_case , __snake_case )
elif PartialState().local_process_index == 0:
torch.save(__snake_case , __snake_case )
@contextmanager
def _A ( **__snake_case :Union[str, Any] ) -> Tuple:
"""simple docstring"""
for key, value in kwargs.items():
__SCREAMING_SNAKE_CASE = str(__snake_case )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _A ( __snake_case :List[Any] ) -> Tuple:
"""simple docstring"""
if not hasattr(__snake_case , "__qualname__" ) and not hasattr(__snake_case , "__name__" ):
__SCREAMING_SNAKE_CASE = getattr(__snake_case , "__class__" , __snake_case )
if hasattr(__snake_case , "__qualname__" ):
return obj.__qualname__
if hasattr(__snake_case , "__name__" ):
return obj.__name__
return str(__snake_case )
def _A ( __snake_case :List[str] , __snake_case :Tuple ) -> Optional[Any]:
"""simple docstring"""
for key, value in source.items():
if isinstance(__snake_case , __snake_case ):
__SCREAMING_SNAKE_CASE = destination.setdefault(__snake_case , {} )
merge_dicts(__snake_case , __snake_case )
else:
__SCREAMING_SNAKE_CASE = value
return destination
def _A ( __snake_case :int = None ) -> bool:
"""simple docstring"""
if port is None:
__SCREAMING_SNAKE_CASE = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 693 |
from __future__ import annotations
_snake_case : str = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_snake_case : Optional[int] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = len(__snake_case )
for i in range(__snake_case ):
__SCREAMING_SNAKE_CASE = -1
for j in range(i + 1 , __snake_case ):
if arr[i] < arr[j]:
__SCREAMING_SNAKE_CASE = arr[j]
break
result.append(__snake_case )
return result
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for i, outer in enumerate(__snake_case ):
__SCREAMING_SNAKE_CASE = -1
for inner in arr[i + 1 :]:
if outer < inner:
__SCREAMING_SNAKE_CASE = inner
break
result.append(__snake_case )
return result
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = len(__snake_case )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [-1] * arr_size
for index in reversed(range(__snake_case ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__SCREAMING_SNAKE_CASE = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_snake_case : Optional[Any] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 693 | 1 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _A ( __snake_case :Any ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def _A ( __snake_case :int ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = emb.weight.shape
__SCREAMING_SNAKE_CASE = nn.Linear(__snake_case , __snake_case , bias=__snake_case )
__SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def _A ( __snake_case :Dict , __snake_case :List[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
for old_key in state_dict.keys():
__SCREAMING_SNAKE_CASE = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
__SCREAMING_SNAKE_CASE = key.replace("moe_layer.experts.0" , f'''ffn.experts.expert_{expert_idx}''' )
else:
__SCREAMING_SNAKE_CASE = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
__SCREAMING_SNAKE_CASE = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
__SCREAMING_SNAKE_CASE = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
__SCREAMING_SNAKE_CASE = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
__SCREAMING_SNAKE_CASE = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
__SCREAMING_SNAKE_CASE = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
__SCREAMING_SNAKE_CASE = key.replace("final_layer_norm" , "ff_layer_norm" )
__SCREAMING_SNAKE_CASE = state_dict[old_key]
return new_dict
def _A ( __snake_case :str , __snake_case :List[Any] , __snake_case :Any , __snake_case :List[Any] , __snake_case :str = WEIGHTS_NAME ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
os.makedirs(__snake_case , exist_ok=__snake_case )
for expert in range(__snake_case ):
__SCREAMING_SNAKE_CASE = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(__snake_case ):
__SCREAMING_SNAKE_CASE = torch.load(__snake_case )["model"]
remove_ignore_keys_(__snake_case )
__SCREAMING_SNAKE_CASE = rename_fairseq_keys(__snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = os.path.join(
__snake_case , weights_name.replace(".bin" , f'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
torch.save(__snake_case , __snake_case )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__snake_case )[0]].dtype )
# Add the last block
__SCREAMING_SNAKE_CASE = os.path.join(__snake_case , weights_name.replace(".bin" , f'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
__SCREAMING_SNAKE_CASE = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(__snake_case )
__SCREAMING_SNAKE_CASE = rename_fairseq_keys(__snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__snake_case ) == 1:
__SCREAMING_SNAKE_CASE = os.path.join(__snake_case , __snake_case )
torch.save(__snake_case , __snake_case )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__snake_case , __snake_case )
# Otherwise, let's build the index
__SCREAMING_SNAKE_CASE = {}
for idx, shard in enumerate(__snake_case ):
__SCREAMING_SNAKE_CASE = weights_name.replace(".bin" , f'''-{idx+1:05d}-of-{len(__snake_case ):05d}.bin''' )
__SCREAMING_SNAKE_CASE = os.path.join(__snake_case , weights_name.replace(".bin" , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__snake_case , os.path.join(__snake_case , __snake_case ) )
for key in shard:
__SCREAMING_SNAKE_CASE = shard_file
# Add the metadata
__SCREAMING_SNAKE_CASE = {"total_size": total_size}
__SCREAMING_SNAKE_CASE = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(__snake_case , __snake_case ) , "w" , encoding="utf-8" ) as f:
__SCREAMING_SNAKE_CASE = json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + "\n"
f.write(__snake_case )
return metadata, index
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
_snake_case : int = parser.parse_args()
_snake_case , _snake_case : Any = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_28,
args.dtype,
)
_snake_case : Optional[Any] = NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28
)
config.save_pretrained(args.pytorch_dump_folder_path)
_snake_case : Optional[int] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path)
| 693 |
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a ) -> Any:
__SCREAMING_SNAKE_CASE = data
__SCREAMING_SNAKE_CASE = None
def __repr__( self ) -> str:
return f'''Node({self.data})'''
class __SCREAMING_SNAKE_CASE :
def __init__( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = None
def __iter__( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.head
while node:
yield node.data
__SCREAMING_SNAKE_CASE = node.next
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join([str(_a ) for item in self] )
def __getitem__( self, _a ) -> Any:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self, _a, _a ) -> None:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
__SCREAMING_SNAKE_CASE = self.head
for _ in range(_a ):
__SCREAMING_SNAKE_CASE = current.next
__SCREAMING_SNAKE_CASE = data
def __lowerCAmelCase ( self, _a ) -> None:
self.insert_nth(len(self ), _a )
def __lowerCAmelCase ( self, _a ) -> None:
self.insert_nth(0, _a )
def __lowerCAmelCase ( self, _a, _a ) -> None:
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
__SCREAMING_SNAKE_CASE = Node(_a )
if self.head is None:
__SCREAMING_SNAKE_CASE = new_node
elif index == 0:
__SCREAMING_SNAKE_CASE = self.head # link new_node to head
__SCREAMING_SNAKE_CASE = new_node
else:
__SCREAMING_SNAKE_CASE = self.head
for _ in range(index - 1 ):
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = new_node
def __lowerCAmelCase ( self ) -> None: # print every node data
print(self )
def __lowerCAmelCase ( self ) -> Any:
return self.delete_nth(0 )
def __lowerCAmelCase ( self ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def __lowerCAmelCase ( self, _a = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
__SCREAMING_SNAKE_CASE = self.head # default first node
if index == 0:
__SCREAMING_SNAKE_CASE = self.head.next
else:
__SCREAMING_SNAKE_CASE = self.head
for _ in range(index - 1 ):
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next.next
return delete_node.data
def __lowerCAmelCase ( self ) -> bool:
return self.head is None
def __lowerCAmelCase ( self ) -> None:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = self.head
while current:
# Store the current node's next node.
__SCREAMING_SNAKE_CASE = current.next
# Make the current node's next point backwards
__SCREAMING_SNAKE_CASE = prev
# Make the previous node be the current node
__SCREAMING_SNAKE_CASE = current
# Make the current node the next node (to progress iteration)
__SCREAMING_SNAKE_CASE = next_node
# Return prev in order to put the head at the end
__SCREAMING_SNAKE_CASE = prev
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = LinkedList()
assert linked_list.is_empty() is True
assert str(__snake_case ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__snake_case ) == i
linked_list.insert_nth(__snake_case , i + 1 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__snake_case ) == 9
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__SCREAMING_SNAKE_CASE = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(-8 , 1 ) )
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [
-9,
100,
Node(7734_5112 ),
"dlrow olleH",
7,
5555,
0,
-1_9_2.5_5_5_5_5,
"Hello, world!",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
__SCREAMING_SNAKE_CASE = LinkedList()
for i in test_input:
linked_list.insert_tail(__snake_case )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__SCREAMING_SNAKE_CASE = linked_list.delete_head()
assert result == -9
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__SCREAMING_SNAKE_CASE = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__SCREAMING_SNAKE_CASE = linked_list.delete_nth(10 )
assert result is None
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__snake_case )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__snake_case )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _A ( ) -> Union[str, Any]:
"""simple docstring"""
from doctest import testmod
testmod()
__SCREAMING_SNAKE_CASE = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(__snake_case )
print("\nReading/changing Node data using indexing:" )
print(f'''Element at Position 1: {linked_list[1]}''' )
__SCREAMING_SNAKE_CASE = input("Enter New Value: " ).strip()
print("New list:" )
print(__snake_case )
print(f'''length of linked_list is : {len(__snake_case )}''' )
if __name__ == "__main__":
main()
| 693 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case : Tuple = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ['MobileViTFeatureExtractor']
_snake_case : Any = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693 |
import argparse
import json
from tqdm import tqdm
def _A ( ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=__snake_case , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=__snake_case , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=__snake_case , help="where to store parsed gold_data_path file" , )
__SCREAMING_SNAKE_CASE = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
__SCREAMING_SNAKE_CASE = json.load(__snake_case )
for dpr_record in tqdm(__snake_case ):
__SCREAMING_SNAKE_CASE = dpr_record["question"]
__SCREAMING_SNAKE_CASE = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(__snake_case ) + "\n" )
if __name__ == "__main__":
main()
| 693 | 1 |
import torch
from torch import nn
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self, _a, _a, _a, _a, _a=1, _a=False ) -> str:
super().__init__()
__SCREAMING_SNAKE_CASE = n_token
__SCREAMING_SNAKE_CASE = d_embed
__SCREAMING_SNAKE_CASE = d_proj
__SCREAMING_SNAKE_CASE = cutoffs + [n_token]
__SCREAMING_SNAKE_CASE = [0] + self.cutoffs
__SCREAMING_SNAKE_CASE = div_val
__SCREAMING_SNAKE_CASE = self.cutoffs[0]
__SCREAMING_SNAKE_CASE = len(self.cutoffs ) - 1
__SCREAMING_SNAKE_CASE = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
__SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed ) )
__SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(self.n_clusters ) )
__SCREAMING_SNAKE_CASE = nn.ModuleList()
__SCREAMING_SNAKE_CASE = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_a, _a ) ) )
else:
self.out_projs.append(_a )
self.out_layers.append(nn.Linear(_a, _a ) )
else:
for i in range(len(self.cutoffs ) ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__SCREAMING_SNAKE_CASE = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_a, _a ) ) )
self.out_layers.append(nn.Linear(_a, r_idx - l_idx ) )
__SCREAMING_SNAKE_CASE = keep_order
def __lowerCAmelCase ( self, _a, _a, _a, _a ) -> Any:
if proj is None:
__SCREAMING_SNAKE_CASE = nn.functional.linear(_a, _a, bias=_a )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
__SCREAMING_SNAKE_CASE = nn.functional.linear(_a, proj.t().contiguous() )
__SCREAMING_SNAKE_CASE = nn.functional.linear(_a, _a, bias=_a )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def __lowerCAmelCase ( self, _a, _a=None, _a=False ) -> Optional[int]:
if labels is not None:
# Shift so that tokens < n predict n
__SCREAMING_SNAKE_CASE = hidden[..., :-1, :].contiguous()
__SCREAMING_SNAKE_CASE = labels[..., 1:].contiguous()
__SCREAMING_SNAKE_CASE = hidden.view(-1, hidden.size(-1 ) )
__SCREAMING_SNAKE_CASE = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("Input and labels should have the same size in the batch dimension." )
else:
__SCREAMING_SNAKE_CASE = hidden.view(-1, hidden.size(-1 ) )
if self.n_clusters == 0:
__SCREAMING_SNAKE_CASE = self._compute_logit(_a, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] )
if labels is not None:
__SCREAMING_SNAKE_CASE = labels != -1_00
__SCREAMING_SNAKE_CASE = torch.zeros_like(_a, dtype=hidden.dtype, device=hidden.device )
__SCREAMING_SNAKE_CASE = (
-nn.functional.log_softmax(_a, dim=-1 )[mask].gather(1, labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
__SCREAMING_SNAKE_CASE = nn.functional.log_softmax(_a, dim=-1 )
else:
# construct weights and biases
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__SCREAMING_SNAKE_CASE = self.out_layers[0].weight[l_idx:r_idx]
__SCREAMING_SNAKE_CASE = self.out_layers[0].bias[l_idx:r_idx]
else:
__SCREAMING_SNAKE_CASE = self.out_layers[i].weight
__SCREAMING_SNAKE_CASE = self.out_layers[i].bias
if i == 0:
__SCREAMING_SNAKE_CASE = torch.cat([weight_i, self.cluster_weight], dim=0 )
__SCREAMING_SNAKE_CASE = torch.cat([bias_i, self.cluster_bias], dim=0 )
weights.append(_a )
biases.append(_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = weights[0], biases[0], self.out_projs[0]
__SCREAMING_SNAKE_CASE = self._compute_logit(_a, _a, _a, _a )
__SCREAMING_SNAKE_CASE = nn.functional.log_softmax(_a, dim=1 )
if labels is None:
__SCREAMING_SNAKE_CASE = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
__SCREAMING_SNAKE_CASE = torch.zeros_like(_a, dtype=hidden.dtype, device=hidden.device )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = [0] + self.cutoffs
for i in range(len(_a ) - 1 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
__SCREAMING_SNAKE_CASE = (labels >= l_idx) & (labels < r_idx)
__SCREAMING_SNAKE_CASE = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
__SCREAMING_SNAKE_CASE = labels.index_select(0, _a ) - l_idx
__SCREAMING_SNAKE_CASE = head_logprob.index_select(0, _a )
__SCREAMING_SNAKE_CASE = hidden.index_select(0, _a )
else:
__SCREAMING_SNAKE_CASE = hidden
if i == 0:
if labels is not None:
__SCREAMING_SNAKE_CASE = head_logprob_i.gather(1, target_i[:, None] ).squeeze(1 )
else:
__SCREAMING_SNAKE_CASE = head_logprob[:, : self.cutoffs[0]]
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = weights[i], biases[i], self.out_projs[i]
__SCREAMING_SNAKE_CASE = self._compute_logit(_a, _a, _a, _a )
__SCREAMING_SNAKE_CASE = nn.functional.log_softmax(_a, dim=1 )
__SCREAMING_SNAKE_CASE = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
__SCREAMING_SNAKE_CASE = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1, target_i[:, None] ).squeeze(1 )
else:
__SCREAMING_SNAKE_CASE = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
__SCREAMING_SNAKE_CASE = logprob_i
if labels is not None:
if (hasattr(self, "keep_order" ) and self.keep_order) or keep_order:
out.index_copy_(0, _a, -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def __lowerCAmelCase ( self, _a ) -> Tuple:
if self.n_clusters == 0:
__SCREAMING_SNAKE_CASE = self._compute_logit(_a, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] )
return nn.functional.log_softmax(_a, dim=-1 )
else:
# construct weights and biases
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__SCREAMING_SNAKE_CASE = self.out_layers[0].weight[l_idx:r_idx]
__SCREAMING_SNAKE_CASE = self.out_layers[0].bias[l_idx:r_idx]
else:
__SCREAMING_SNAKE_CASE = self.out_layers[i].weight
__SCREAMING_SNAKE_CASE = self.out_layers[i].bias
if i == 0:
__SCREAMING_SNAKE_CASE = torch.cat([weight_i, self.cluster_weight], dim=0 )
__SCREAMING_SNAKE_CASE = torch.cat([bias_i, self.cluster_bias], dim=0 )
weights.append(_a )
biases.append(_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = weights[0], biases[0], self.out_projs[0]
__SCREAMING_SNAKE_CASE = self._compute_logit(_a, _a, _a, _a )
__SCREAMING_SNAKE_CASE = hidden.new_empty((head_logit.size(0 ), self.n_token) )
__SCREAMING_SNAKE_CASE = nn.functional.log_softmax(_a, dim=1 )
__SCREAMING_SNAKE_CASE = [0] + self.cutoffs
for i in range(len(_a ) - 1 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
__SCREAMING_SNAKE_CASE = head_logprob[:, : self.cutoffs[0]]
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = weights[i], biases[i], self.out_projs[i]
__SCREAMING_SNAKE_CASE = self._compute_logit(_a, _a, _a, _a )
__SCREAMING_SNAKE_CASE = nn.functional.log_softmax(_a, dim=1 )
__SCREAMING_SNAKE_CASE = head_logprob[:, -i] + tail_logprob_i
__SCREAMING_SNAKE_CASE = logprob_i
return out
| 693 |
def _A ( __snake_case :int = 10**9 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 693 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : Tuple = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ ="""unispeech-sat"""
def __init__( self, _a=32, _a=7_68, _a=12, _a=12, _a=30_72, _a="gelu", _a=0.1, _a=0.1, _a=0.1, _a=0.0, _a=0.0, _a=0.1, _a=0.1, _a=0.02, _a=1E-5, _a="group", _a="gelu", _a=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12), _a=(5, 2, 2, 2, 2, 2, 2), _a=(10, 3, 3, 3, 3, 2, 2), _a=False, _a=1_28, _a=16, _a=False, _a=True, _a=0.05, _a=10, _a=2, _a=0.0, _a=10, _a=0, _a=3_20, _a=2, _a=0.1, _a=1_00, _a=2_56, _a=2_56, _a=0.1, _a="mean", _a=False, _a=False, _a=2_56, _a=(5_12, 5_12, 5_12, 5_12, 15_00), _a=(5, 3, 3, 1, 1), _a=(1, 2, 3, 1, 1), _a=5_12, _a=0, _a=1, _a=2, _a=5_04, **_a, ) -> List[str]:
super().__init__(**_a, pad_token_id=_a, bos_token_id=_a, eos_token_id=_a )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = feat_extract_norm
__SCREAMING_SNAKE_CASE = feat_extract_activation
__SCREAMING_SNAKE_CASE = list(_a )
__SCREAMING_SNAKE_CASE = list(_a )
__SCREAMING_SNAKE_CASE = list(_a )
__SCREAMING_SNAKE_CASE = conv_bias
__SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE = len(self.conv_dim )
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = feat_proj_dropout
__SCREAMING_SNAKE_CASE = final_dropout
__SCREAMING_SNAKE_CASE = layerdrop
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = num_clusters
__SCREAMING_SNAKE_CASE = do_stable_layer_norm
__SCREAMING_SNAKE_CASE = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE = apply_spec_augment
__SCREAMING_SNAKE_CASE = mask_time_prob
__SCREAMING_SNAKE_CASE = mask_time_length
__SCREAMING_SNAKE_CASE = mask_time_min_masks
__SCREAMING_SNAKE_CASE = mask_feature_prob
__SCREAMING_SNAKE_CASE = mask_feature_length
__SCREAMING_SNAKE_CASE = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__SCREAMING_SNAKE_CASE = num_codevectors_per_group
__SCREAMING_SNAKE_CASE = num_codevector_groups
__SCREAMING_SNAKE_CASE = contrastive_logits_temperature
__SCREAMING_SNAKE_CASE = feat_quantizer_dropout
__SCREAMING_SNAKE_CASE = num_negatives
__SCREAMING_SNAKE_CASE = codevector_dim
__SCREAMING_SNAKE_CASE = proj_codevector_dim
__SCREAMING_SNAKE_CASE = diversity_loss_weight
# ctc loss
__SCREAMING_SNAKE_CASE = ctc_loss_reduction
__SCREAMING_SNAKE_CASE = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE = list(_a )
__SCREAMING_SNAKE_CASE = list(_a )
__SCREAMING_SNAKE_CASE = list(_a )
__SCREAMING_SNAKE_CASE = xvector_output_dim
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
return functools.reduce(operator.mul, self.conv_stride, 1 )
| 693 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_snake_case , _snake_case , _snake_case : List[Any] = False, False, False
@dataclass
class __SCREAMING_SNAKE_CASE :
SCREAMING_SNAKE_CASE__ =None
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =None
# Automatically constructed
SCREAMING_SNAKE_CASE__ ="dict"
SCREAMING_SNAKE_CASE__ =pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
SCREAMING_SNAKE_CASE__ =field(default="""Audio""" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self ) -> Optional[int]:
return self.pa_type
def __lowerCAmelCase ( self, _a ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(_a, _a ):
return {"bytes": None, "path": value}
elif isinstance(_a, _a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__SCREAMING_SNAKE_CASE = BytesIO()
sf.write(_a, value["array"], value["sampling_rate"], format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__SCREAMING_SNAKE_CASE = np.frombuffer(value["bytes"], dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
__SCREAMING_SNAKE_CASE = np.memmap(value["path"], dtype="h", mode="r" ).astype(np.floataa ) / 3_27_67
__SCREAMING_SNAKE_CASE = BytesIO(bytes() )
sf.write(_a, _a, value["sampling_rate"], format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __lowerCAmelCase ( self, _a, _a = None ) -> dict:
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
__SCREAMING_SNAKE_CASE = xsplitext(_a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
__SCREAMING_SNAKE_CASE = token_per_repo_id or {}
__SCREAMING_SNAKE_CASE = path.split("::" )[-1]
try:
__SCREAMING_SNAKE_CASE = string_to_dict(_a, config.HUB_DATASETS_URL )["repo_id"]
__SCREAMING_SNAKE_CASE = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__SCREAMING_SNAKE_CASE = None
with xopen(_a, "rb", use_auth_token=_a ) as f:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a )
__SCREAMING_SNAKE_CASE = array.T
if self.mono:
__SCREAMING_SNAKE_CASE = librosa.to_mono(_a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
__SCREAMING_SNAKE_CASE = librosa.resample(_a, orig_sr=_a, target_sr=self.sampling_rate )
__SCREAMING_SNAKE_CASE = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __lowerCAmelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def __lowerCAmelCase ( self, _a ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
__SCREAMING_SNAKE_CASE = pa.array([Audio().encode_example(_a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
__SCREAMING_SNAKE_CASE = storage.field("bytes" )
else:
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
__SCREAMING_SNAKE_CASE = storage.field("path" )
else:
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
return array_cast(_a, self.pa_type )
def __lowerCAmelCase ( self, _a ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_a ):
with xopen(_a, "rb" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
return bytes_
__SCREAMING_SNAKE_CASE = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
__SCREAMING_SNAKE_CASE = pa.array(
[os.path.basename(_a ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(_a, self.pa_type )
| 693 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : Optional[Any] = {
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = ['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = ['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = [
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
_snake_case : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =(IPNDMScheduler,)
SCREAMING_SNAKE_CASE__ =(("""num_inference_steps""", 50),)
def __lowerCAmelCase ( self, **_a ) -> str:
__SCREAMING_SNAKE_CASE = {"num_train_timesteps": 10_00}
config.update(**_a )
return config
def __lowerCAmelCase ( self, _a=0, **_a ) -> List[Any]:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
if time_step is None:
__SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
__SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a )
new_scheduler.set_timesteps(_a )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self ) -> str:
pass
def __lowerCAmelCase ( self, _a=0, **_a ) -> int:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals (must be after setting timesteps)
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
if time_step is None:
__SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
__SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a )
# copy over dummy past residuals
new_scheduler.set_timesteps(_a )
# copy over dummy past residual (must be after setting timesteps)
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self, **_a ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE = 10
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(_a )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE = model(_a, _a )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE = model(_a, _a )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample
return sample
def __lowerCAmelCase ( self ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
if num_inference_steps is not None and hasattr(_a, "set_timesteps" ):
scheduler.set_timesteps(_a )
elif num_inference_steps is not None and not hasattr(_a, "set_timesteps" ):
__SCREAMING_SNAKE_CASE = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.timesteps[5]
__SCREAMING_SNAKE_CASE = scheduler.timesteps[6]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def __lowerCAmelCase ( self ) -> str:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a, time_step=_a )
def __lowerCAmelCase ( self ) -> Optional[Any]:
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=_a, time_step=_a )
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.full_loop()
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 693 | 1 |
def _A ( __snake_case :str , __snake_case :str = " " ) -> list:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
for index, char in enumerate(__snake_case ):
if char == separator:
split_words.append(string[last_index:index] )
__SCREAMING_SNAKE_CASE = index + 1
elif index + 1 == len(__snake_case ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 693 |
import random
from .binary_exp_mod import bin_exp_mod
def _A ( __snake_case :List[Any] , __snake_case :Union[str, Any]=1000 ) -> int:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__SCREAMING_SNAKE_CASE = n - 1
__SCREAMING_SNAKE_CASE = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__SCREAMING_SNAKE_CASE = 0
while count < prec:
__SCREAMING_SNAKE_CASE = random.randint(2 , n - 1 )
__SCREAMING_SNAKE_CASE = bin_exp_mod(__snake_case , __snake_case , __snake_case )
if b != 1:
__SCREAMING_SNAKE_CASE = True
for _ in range(__snake_case ):
if b == n - 1:
__SCREAMING_SNAKE_CASE = False
break
__SCREAMING_SNAKE_CASE = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_snake_case : int = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 693 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case : Optional[Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =["""pixel_values"""]
def __init__( self, _a = True, _a = None, _a = PILImageResampling.BILINEAR, _a = True, _a = None, _a = True, _a = 1 / 2_55, _a = True, _a = None, _a = None, **_a, ) -> None:
super().__init__(**_a )
__SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 2_56}
__SCREAMING_SNAKE_CASE = get_size_dict(_a, default_to_square=_a )
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
__SCREAMING_SNAKE_CASE = get_size_dict(_a )
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = resample
__SCREAMING_SNAKE_CASE = do_center_crop
__SCREAMING_SNAKE_CASE = crop_size
__SCREAMING_SNAKE_CASE = do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self, _a, _a, _a = PILImageResampling.BICUBIC, _a = None, **_a, ) -> np.ndarray:
__SCREAMING_SNAKE_CASE = get_size_dict(_a, default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__SCREAMING_SNAKE_CASE = get_resize_output_image_size(_a, size=size["shortest_edge"], default_to_square=_a )
return resize(_a, size=_a, resample=_a, data_format=_a, **_a )
def __lowerCAmelCase ( self, _a, _a, _a = None, **_a, ) -> np.ndarray:
__SCREAMING_SNAKE_CASE = get_size_dict(_a )
return center_crop(_a, size=(size["height"], size["width"]), data_format=_a, **_a )
def __lowerCAmelCase ( self, _a, _a, _a = None, **_a ) -> np.ndarray:
return rescale(_a, scale=_a, data_format=_a, **_a )
def __lowerCAmelCase ( self, _a, _a, _a, _a = None, **_a, ) -> np.ndarray:
return normalize(_a, mean=_a, std=_a, data_format=_a, **_a )
def __lowerCAmelCase ( self, _a, _a = None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = ChannelDimension.FIRST, **_a, ) -> List[Any]:
__SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE = size if size is not None else self.size
__SCREAMING_SNAKE_CASE = get_size_dict(_a, default_to_square=_a )
__SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size
__SCREAMING_SNAKE_CASE = get_size_dict(_a )
__SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE = [to_numpy_array(_a ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE = [self.resize(image=_a, size=_a, resample=_a ) for image in images]
if do_center_crop:
__SCREAMING_SNAKE_CASE = [self.center_crop(image=_a, size=_a ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE = [self.rescale(image=_a, scale=_a ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE = [self.normalize(image=_a, mean=_a, std=_a ) for image in images]
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(_a, _a ) for image in images]
__SCREAMING_SNAKE_CASE = {"pixel_values": images}
return BatchFeature(data=_a, tensor_type=_a )
| 693 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _A ( __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
__SCREAMING_SNAKE_CASE = ksize + 1
__SCREAMING_SNAKE_CASE = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__snake_case ):
for x in range(__snake_case ):
# distance from center
__SCREAMING_SNAKE_CASE = x - ksize // 2
__SCREAMING_SNAKE_CASE = y - ksize // 2
# degree to radiant
__SCREAMING_SNAKE_CASE = theta / 180 * np.pi
__SCREAMING_SNAKE_CASE = np.cos(_theta )
__SCREAMING_SNAKE_CASE = np.sin(_theta )
# get kernel x
__SCREAMING_SNAKE_CASE = cos_theta * px + sin_theta * py
# get kernel y
__SCREAMING_SNAKE_CASE = -sin_theta * px + cos_theta * py
# fill kernel
__SCREAMING_SNAKE_CASE = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case : Union[str, Any] = imread('../image_data/lena.jpg')
# turn image in gray scale value
_snake_case : List[str] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case : int = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
_snake_case : List[str] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case : Optional[Any] = out / out.max() * 2_55
_snake_case : Union[str, Any] = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 693 | 1 |
from __future__ import annotations
from cmath import sqrt
def _A ( __snake_case :int , __snake_case :int , __snake_case :int ) -> tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
__SCREAMING_SNAKE_CASE = b * b - 4 * a * c
__SCREAMING_SNAKE_CASE = (-b + sqrt(__snake_case )) / (2 * a)
__SCREAMING_SNAKE_CASE = (-b - sqrt(__snake_case )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _A ( ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = quadratic_roots(a=5 , b=6 , c=1 )
print(f'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 693 |
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
assert isinstance(__snake_case , __snake_case ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
__SCREAMING_SNAKE_CASE = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(__snake_case )
else:
__SCREAMING_SNAKE_CASE = sylvester(number - 1 )
__SCREAMING_SNAKE_CASE = num - 1
__SCREAMING_SNAKE_CASE = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 693 | 1 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _A ( __snake_case :dict ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def _A ( __snake_case :np.ndarray , __snake_case :np.ndarray , __snake_case :np.ndarray ) -> np.ndarray:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(__snake_case , __snake_case )
# Predict target for test data
__SCREAMING_SNAKE_CASE = xgb.predict(__snake_case )
__SCREAMING_SNAKE_CASE = predictions.reshape(len(__snake_case ) , 1 )
return predictions
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = fetch_california_housing()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = data_handling(__snake_case )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = train_test_split(
__snake_case , __snake_case , test_size=0.2_5 , random_state=1 )
__SCREAMING_SNAKE_CASE = xgboost(__snake_case , __snake_case , __snake_case )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(__snake_case , __snake_case )}''' )
print(f'''Mean Square Error : {mean_squared_error(__snake_case , __snake_case )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 693 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __lowerCAmelCase ( *_a, **_a ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", )
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_a ), [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
], )
@require_tf
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", framework="tf" )
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(_a ), [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
], )
@slow
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", )
# This is an image of 2 cats with remotes and no planes
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_a ), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
@slow
@require_tf
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", framework="tf" )
# This is an image of 2 cats with remotes and no planes
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_a ), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
| 693 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : int = logging.get_logger(__name__)
_snake_case : str = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ ="""pix2struct_text_model"""
SCREAMING_SNAKE_CASE__ =["""past_key_values"""]
SCREAMING_SNAKE_CASE__ ={
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self, _a=5_02_44, _a=7_68, _a=64, _a=20_48, _a=12, _a=12, _a=32, _a=1_28, _a=0.1, _a=1E-6, _a=1.0, _a="gelu_new", _a=0, _a=False, _a=0, _a=1, _a=False, _a=True, **_a, ) -> str:
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = d_kv
__SCREAMING_SNAKE_CASE = d_ff
__SCREAMING_SNAKE_CASE = num_layers
__SCREAMING_SNAKE_CASE = num_heads
__SCREAMING_SNAKE_CASE = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE = relative_attention_max_distance
__SCREAMING_SNAKE_CASE = dropout_rate
__SCREAMING_SNAKE_CASE = layer_norm_epsilon
__SCREAMING_SNAKE_CASE = initializer_factor
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = decoder_start_token_id
# for backwards compatibility
__SCREAMING_SNAKE_CASE = dense_act_fn
super().__init__(
pad_token_id=_a, eos_token_id=_a, decoder_start_token_id=_a, tie_word_embeddings=_a, is_decoder=_a, **_a, )
@classmethod
def __lowerCAmelCase ( cls, _a, **_a ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = cls.get_config_dict(_a, **_a )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
__SCREAMING_SNAKE_CASE = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls, "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a, **_a )
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ ="""pix2struct_vision_model"""
def __init__( self, _a=7_68, _a=7_68, _a=20_48, _a=64, _a=12, _a=12, _a="gelu_new", _a=1E-6, _a=0.0, _a=0.0, _a=1E-1_0, _a=1.0, _a=40_96, _a=32, _a=1_28, **_a, ) -> Optional[int]:
super().__init__(**_a )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = patch_embed_hidden_size
__SCREAMING_SNAKE_CASE = d_ff
__SCREAMING_SNAKE_CASE = dropout_rate
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = initializer_factor
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = dense_act_fn
__SCREAMING_SNAKE_CASE = seq_len
__SCREAMING_SNAKE_CASE = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE = relative_attention_max_distance
__SCREAMING_SNAKE_CASE = d_kv
@classmethod
def __lowerCAmelCase ( cls, _a, **_a ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = cls.get_config_dict(_a, **_a )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
__SCREAMING_SNAKE_CASE = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls, "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a, **_a )
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ ="""pix2struct"""
SCREAMING_SNAKE_CASE__ =True
def __init__( self, _a=None, _a=None, _a=1.0, _a=0.02, _a=False, _a=False, _a=True, **_a, ) -> Any:
super().__init__(tie_word_embeddings=_a, is_encoder_decoder=_a, **_a )
if text_config is None:
__SCREAMING_SNAKE_CASE = {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
__SCREAMING_SNAKE_CASE = {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
__SCREAMING_SNAKE_CASE = PixaStructTextConfig(**_a )
__SCREAMING_SNAKE_CASE = PixaStructVisionConfig(**_a )
__SCREAMING_SNAKE_CASE = self.text_config.decoder_start_token_id
__SCREAMING_SNAKE_CASE = self.text_config.pad_token_id
__SCREAMING_SNAKE_CASE = self.text_config.eos_token_id
__SCREAMING_SNAKE_CASE = initializer_factor
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = self.initializer_range
__SCREAMING_SNAKE_CASE = self.initializer_range
__SCREAMING_SNAKE_CASE = is_vqa
@classmethod
def __lowerCAmelCase ( cls, _a, _a, **_a ) -> Any:
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **_a )
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE = self.text_config.to_dict()
__SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 693 |
from __future__ import annotations
import math
def _A ( __snake_case :int , __snake_case :int , __snake_case :bool , __snake_case :list[int] , __snake_case :float ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__snake_case ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__SCREAMING_SNAKE_CASE = math.log(len(__snake_case ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 693 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ ="""dandelin/vilt-b32-finetuned-vqa"""
SCREAMING_SNAKE_CASE__ =(
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
SCREAMING_SNAKE_CASE__ ="""image_qa"""
SCREAMING_SNAKE_CASE__ =AutoProcessor
SCREAMING_SNAKE_CASE__ =AutoModelForVisualQuestionAnswering
SCREAMING_SNAKE_CASE__ =["""image""", """text"""]
SCREAMING_SNAKE_CASE__ =["""text"""]
def __init__( self, *_a, **_a ) -> Optional[int]:
requires_backends(self, ["vision"] )
super().__init__(*_a, **_a )
def __lowerCAmelCase ( self, _a, _a ) -> List[str]:
return self.pre_processor(_a, _a, return_tensors="pt" )
def __lowerCAmelCase ( self, _a ) -> str:
with torch.no_grad():
return self.model(**_a ).logits
def __lowerCAmelCase ( self, _a ) -> Tuple:
__SCREAMING_SNAKE_CASE = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 693 |
def _A ( __snake_case :bytes ) -> str:
"""simple docstring"""
return "".join([hex(__snake_case )[2:].zfill(2 ).upper() for byte in list(__snake_case )] )
def _A ( __snake_case :str ) -> bytes:
"""simple docstring"""
if (len(__snake_case ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__snake_case ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(__snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_snake_case : Optional[Any] = logging.get_logger(__name__)
_snake_case : Dict = {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ ="""blenderbot-small"""
SCREAMING_SNAKE_CASE__ =["""past_key_values"""]
SCREAMING_SNAKE_CASE__ ={"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self, _a=5_02_65, _a=5_12, _a=8, _a=20_48, _a=16, _a=8, _a=20_48, _a=16, _a=0.0, _a=0.0, _a=True, _a=True, _a="gelu", _a=5_12, _a=0.1, _a=0.0, _a=0.0, _a=0.02, _a=1, _a=False, _a=0, _a=1, _a=2, _a=2, **_a, ) -> List[str]:
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = decoder_layerdrop
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_a, bos_token_id=_a, eos_token_id=_a, is_encoder_decoder=_a, decoder_start_token_id=_a, forced_eos_token_id=_a, **_a, )
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__SCREAMING_SNAKE_CASE = {0: "batch"}
__SCREAMING_SNAKE_CASE = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__SCREAMING_SNAKE_CASE = {0: "batch", 1: "decoder_sequence"}
__SCREAMING_SNAKE_CASE = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_a, direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__SCREAMING_SNAKE_CASE = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_layers
for i in range(_a ):
__SCREAMING_SNAKE_CASE = {0: "batch", 2: "past_sequence + sequence"}
__SCREAMING_SNAKE_CASE = {0: "batch", 2: "past_sequence + sequence"}
else:
__SCREAMING_SNAKE_CASE = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE = super().outputs
else:
__SCREAMING_SNAKE_CASE = super(_a, self ).outputs
if self.use_past:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_layers
for i in range(_a ):
__SCREAMING_SNAKE_CASE = {0: "batch", 2: "past_sequence + sequence"}
__SCREAMING_SNAKE_CASE = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def __lowerCAmelCase ( self, _a, _a = -1, _a = -1, _a = False, _a = None, ) -> Mapping[str, Any]:
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_a, _a, _a, _a, _a )
# Generate decoder inputs
__SCREAMING_SNAKE_CASE = seq_length if not self.use_past else 1
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_a, _a, _a, _a, _a )
__SCREAMING_SNAKE_CASE = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
__SCREAMING_SNAKE_CASE = dict(**_a, **_a )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = common_inputs["input_ids"].shape
__SCREAMING_SNAKE_CASE = common_inputs["decoder_input_ids"].shape[1]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_attention_heads
__SCREAMING_SNAKE_CASE = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__SCREAMING_SNAKE_CASE = decoder_seq_length + 3
__SCREAMING_SNAKE_CASE = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__SCREAMING_SNAKE_CASE = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(_a, _a )], dim=1 )
__SCREAMING_SNAKE_CASE = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_layers
__SCREAMING_SNAKE_CASE = min(_a, _a )
__SCREAMING_SNAKE_CASE = max(_a, _a ) - min_num_layers
__SCREAMING_SNAKE_CASE = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(_a ):
common_inputs["past_key_values"].append(
(
torch.zeros(_a ),
torch.zeros(_a ),
torch.zeros(_a ),
torch.zeros(_a ),
) )
# TODO: test this.
__SCREAMING_SNAKE_CASE = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(_a, _a ):
common_inputs["past_key_values"].append((torch.zeros(_a ), torch.zeros(_a )) )
return common_inputs
def __lowerCAmelCase ( self, _a, _a = -1, _a = -1, _a = False, _a = None, ) -> Mapping[str, Any]:
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_a, _a, _a, _a, _a )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__SCREAMING_SNAKE_CASE = seqlen + 2
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_layers
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_attention_heads
__SCREAMING_SNAKE_CASE = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__SCREAMING_SNAKE_CASE = common_inputs["attention_mask"].dtype
__SCREAMING_SNAKE_CASE = torch.cat(
[common_inputs["attention_mask"], torch.ones(_a, _a, dtype=_a )], dim=1 )
__SCREAMING_SNAKE_CASE = [
(torch.zeros(_a ), torch.zeros(_a )) for _ in range(_a )
]
return common_inputs
def __lowerCAmelCase ( self, _a, _a = -1, _a = -1, _a = False, _a = None, ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__SCREAMING_SNAKE_CASE = compute_effective_axis_dimension(
_a, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__SCREAMING_SNAKE_CASE = tokenizer.num_special_tokens_to_add(_a )
__SCREAMING_SNAKE_CASE = compute_effective_axis_dimension(
_a, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=_a )
# Generate dummy inputs according to compute batch and sequence
__SCREAMING_SNAKE_CASE = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
__SCREAMING_SNAKE_CASE = dict(tokenizer(_a, return_tensors=_a ) )
return common_inputs
def __lowerCAmelCase ( self, _a, _a = -1, _a = -1, _a = False, _a = None, ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_a, batch_size=_a, seq_length=_a, is_pair=_a, framework=_a )
elif self.task == "causal-lm":
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_causal_lm(
_a, batch_size=_a, seq_length=_a, is_pair=_a, framework=_a )
else:
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_a, batch_size=_a, seq_length=_a, is_pair=_a, framework=_a )
return common_inputs
def __lowerCAmelCase ( self, _a, _a, _a, _a ) -> int:
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE = super()._flatten_past_key_values_(_a, _a, _a, _a )
else:
__SCREAMING_SNAKE_CASE = super(_a, self )._flatten_past_key_values_(
_a, _a, _a, _a )
| 693 |
from functools import lru_cache
def _A ( __snake_case :int ) -> set:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__snake_case )
if n > 1:
factors.add(__snake_case )
return factors
@lru_cache
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
return len(unique_prime_factors(__snake_case ) )
def _A ( __snake_case :list ) -> bool:
"""simple docstring"""
return len(set(__snake_case ) ) in (0, 1)
def _A ( __snake_case :int ) -> list:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 2
while True:
# Increment each value of a generated range
__SCREAMING_SNAKE_CASE = [base + i for i in range(__snake_case )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__SCREAMING_SNAKE_CASE = [upf_len(__snake_case ) for x in group]
checker.append(__snake_case )
# If all numbers in the list are equal, return the group variable.
if equality(__snake_case ):
return group
# Increment our base variable by 1
base += 1
def _A ( __snake_case :int = 4 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = run(__snake_case )
return results[0] if len(__snake_case ) else None
if __name__ == "__main__":
print(solution())
| 693 | 1 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def _A ( ) -> str:
"""simple docstring"""
raise RuntimeError("CUDA out of memory." )
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self ) -> List[str]:
super().__init__()
__SCREAMING_SNAKE_CASE = nn.Linear(3, 4 )
__SCREAMING_SNAKE_CASE = nn.BatchNormad(4 )
__SCREAMING_SNAKE_CASE = nn.Linear(4, 5 )
def __lowerCAmelCase ( self, _a ) -> Dict:
return self.lineara(self.batchnorm(self.lineara(_a ) ) )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(_a ):
nonlocal batch_sizes
batch_sizes.append(_a )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(_a, [1_28, 64, 32, 16, 8] )
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(_a, _a ):
nonlocal batch_sizes
batch_sizes.append(_a )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = mock_training_loop_function("hello" )
self.assertListEqual(_a, [1_28, 64, 32, 16, 8] )
self.assertListEqual([bs, arga], [8, "hello"] )
def __lowerCAmelCase ( self ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(_a ):
pass
with self.assertRaises(_a ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero.", cm.exception.args[0] )
def __lowerCAmelCase ( self ) -> List[str]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_a ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(_a ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero.", cm.exception.args[0] )
def __lowerCAmelCase ( self ) -> int:
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(_a, _a, _a ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(_a ) as cm:
mock_training_loop_function(1_28, "hello", "world" )
self.assertIn("Batch size was passed into `f`", cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')", cm.exception.args[0] )
def __lowerCAmelCase ( self ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_a ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(_a ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!", cm.exception.args[0] )
@require_cuda
def __lowerCAmelCase ( self ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = torch.cuda.memory_allocated()
__SCREAMING_SNAKE_CASE = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated(), _a )
__SCREAMING_SNAKE_CASE = release_memory(_a )
self.assertEqual(torch.cuda.memory_allocated(), _a )
| 693 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _A ( __snake_case :Dict ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = VideoMAEConfig()
set_architecture_configs(__snake_case , __snake_case )
if "finetuned" not in model_name:
__SCREAMING_SNAKE_CASE = False
if "finetuned" in model_name:
__SCREAMING_SNAKE_CASE = "huggingface/label-files"
if "kinetics" in model_name:
__SCREAMING_SNAKE_CASE = 400
__SCREAMING_SNAKE_CASE = "kinetics400-id2label.json"
elif "ssv2" in model_name:
__SCREAMING_SNAKE_CASE = 174
__SCREAMING_SNAKE_CASE = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
__SCREAMING_SNAKE_CASE = {int(__snake_case ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def _A ( __snake_case :Dict , __snake_case :Optional[Any] ) -> List[Any]:
"""simple docstring"""
if "small" in model_name:
__SCREAMING_SNAKE_CASE = 384
__SCREAMING_SNAKE_CASE = 1536
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = 192
__SCREAMING_SNAKE_CASE = 768
elif "large" in model_name:
__SCREAMING_SNAKE_CASE = 1024
__SCREAMING_SNAKE_CASE = 4096
__SCREAMING_SNAKE_CASE = 24
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 512
__SCREAMING_SNAKE_CASE = 2048
elif "huge" in model_name:
__SCREAMING_SNAKE_CASE = 1280
__SCREAMING_SNAKE_CASE = 5120
__SCREAMING_SNAKE_CASE = 32
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 640
__SCREAMING_SNAKE_CASE = 2560
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def _A ( __snake_case :List[Any] ) -> Optional[int]:
"""simple docstring"""
if "encoder." in name:
__SCREAMING_SNAKE_CASE = name.replace("encoder." , "" )
if "cls_token" in name:
__SCREAMING_SNAKE_CASE = name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.self" )
if "attn" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.attention" )
if "norm1" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__SCREAMING_SNAKE_CASE = name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__SCREAMING_SNAKE_CASE = name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace("head" , "classifier" )
return name
def _A ( __snake_case :Union[str, Any] , __snake_case :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = orig_state_dict.pop(__snake_case )
if key.startswith("encoder." ):
__SCREAMING_SNAKE_CASE = key.replace("encoder." , "" )
if "qkv" in key:
__SCREAMING_SNAKE_CASE = key.split("." )
if key.startswith("decoder.blocks" ):
__SCREAMING_SNAKE_CASE = config.decoder_hidden_size
__SCREAMING_SNAKE_CASE = int(key_split[2] )
__SCREAMING_SNAKE_CASE = "decoder.decoder_layers."
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = config.hidden_size
__SCREAMING_SNAKE_CASE = int(key_split[1] )
__SCREAMING_SNAKE_CASE = "videomae.encoder.layer."
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = val
return orig_state_dict
def _A ( ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
__SCREAMING_SNAKE_CASE = np.load(__snake_case )
return list(__snake_case )
def _A ( __snake_case :Optional[int] , __snake_case :List[str] , __snake_case :Union[str, Any] , __snake_case :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_videomae_config(__snake_case )
if "finetuned" in model_name:
__SCREAMING_SNAKE_CASE = VideoMAEForVideoClassification(__snake_case )
else:
__SCREAMING_SNAKE_CASE = VideoMAEForPreTraining(__snake_case )
# download original checkpoint, hosted on Google Drive
__SCREAMING_SNAKE_CASE = "pytorch_model.bin"
gdown.cached_download(__snake_case , __snake_case , quiet=__snake_case )
__SCREAMING_SNAKE_CASE = torch.load(__snake_case , map_location="cpu" )
if "model" in files:
__SCREAMING_SNAKE_CASE = files["model"]
else:
__SCREAMING_SNAKE_CASE = files["module"]
__SCREAMING_SNAKE_CASE = convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
model.eval()
# verify model on basic input
__SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__SCREAMING_SNAKE_CASE = prepare_video()
__SCREAMING_SNAKE_CASE = image_processor(__snake_case , return_tensors="pt" )
if "finetuned" not in model_name:
__SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
__SCREAMING_SNAKE_CASE = torch.load(__snake_case )
__SCREAMING_SNAKE_CASE = model(**__snake_case )
__SCREAMING_SNAKE_CASE = outputs.logits
__SCREAMING_SNAKE_CASE = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] )
elif model_name == "videomae-small-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] )
elif model_name == "videomae-base":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] )
elif model_name == "videomae-base-short":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] )
# we verified the loss both for normalized and unnormalized targets for this one
__SCREAMING_SNAKE_CASE = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] )
elif model_name == "videomae-large":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] )
elif model_name == "videomae-large-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] )
elif model_name == "videomae-huge-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] )
elif model_name == "videomae-base-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] )
elif model_name == "videomae-base-short-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] )
elif model_name == "videomae-base-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] )
elif model_name == "videomae-base-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __snake_case , atol=1e-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__SCREAMING_SNAKE_CASE = outputs.loss
assert torch.allclose(__snake_case , __snake_case , atol=1e-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
model.save_pretrained(__snake_case )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(__snake_case , organization="nielsr" )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the ๐ค hub.'
)
_snake_case : Optional[int] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 693 | 1 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_snake_case : int = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
_snake_case : Any = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
_snake_case : List[Any] = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
_snake_case : int = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
_snake_case : Optional[Any] = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def _A ( __snake_case :Any , __snake_case :str ) -> List[str]:
"""simple docstring"""
for tf_name, hf_name in patterns:
__SCREAMING_SNAKE_CASE = k.replace(__snake_case , __snake_case )
return k
def _A ( __snake_case :dict , __snake_case :dict ) -> BigBirdPegasusForConditionalGeneration:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BigBirdPegasusConfig(**__snake_case )
__SCREAMING_SNAKE_CASE = BigBirdPegasusForConditionalGeneration(__snake_case )
__SCREAMING_SNAKE_CASE = torch_model.state_dict()
__SCREAMING_SNAKE_CASE = {}
# separating decoder weights
__SCREAMING_SNAKE_CASE = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
__SCREAMING_SNAKE_CASE = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items() , "tf -> hf conversion" ):
__SCREAMING_SNAKE_CASE = [k.endswith(__snake_case ) for ending in KEYS_TO_IGNORE]
if any(__snake_case ):
continue
__SCREAMING_SNAKE_CASE = DECODER_PATTERNS
__SCREAMING_SNAKE_CASE = rename_state_dict_key(__snake_case , __snake_case )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__SCREAMING_SNAKE_CASE = v.T
__SCREAMING_SNAKE_CASE = torch.from_numpy(__snake_case )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items() , "tf -> hf conversion" ):
__SCREAMING_SNAKE_CASE = [k.endswith(__snake_case ) for ending in KEYS_TO_IGNORE]
if any(__snake_case ):
continue
__SCREAMING_SNAKE_CASE = REMAINING_PATTERNS
__SCREAMING_SNAKE_CASE = rename_state_dict_key(__snake_case , __snake_case )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__SCREAMING_SNAKE_CASE = v.T
__SCREAMING_SNAKE_CASE = torch.from_numpy(__snake_case )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
__SCREAMING_SNAKE_CASE = mapping["model.embed_positions.weight"]
__SCREAMING_SNAKE_CASE = mapping.pop("model.embed_positions.weight" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch_model.load_state_dict(__snake_case , strict=__snake_case )
__SCREAMING_SNAKE_CASE = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def _A ( __snake_case :Tuple ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tf.train.list_variables(__snake_case )
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = ["global_step"]
for name, shape in tqdm(__snake_case , desc="converting tf checkpoint to dict" ):
__SCREAMING_SNAKE_CASE = any(pat in name for pat in ignore_name )
if skip_key:
continue
__SCREAMING_SNAKE_CASE = tf.train.load_variable(__snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = array
return tf_weights
def _A ( __snake_case :str , __snake_case :str , __snake_case :dict ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_tf_weights_as_numpy(__snake_case )
__SCREAMING_SNAKE_CASE = convert_bigbird_pegasus(__snake_case , __snake_case )
torch_model.save_pretrained(__snake_case )
if __name__ == "__main__":
_snake_case : Dict = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
_snake_case : Tuple = parser.parse_args()
_snake_case : Optional[int] = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 693 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_snake_case : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
def __init__( self, *_a, **_a ) -> None:
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead.", _a, )
super().__init__(*_a, **_a )
| 693 | 1 |
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =["""keras_nlp"""]
def __init__( self, *_a, **_a ) -> int:
requires_backends(self, ["keras_nlp"] )
| 693 |
from math import sqrt
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0
for i in range(1 , int(sqrt(__snake_case ) + 1 ) ):
if n % i == 0 and i != sqrt(__snake_case ):
total += i + n // i
elif i == sqrt(__snake_case ):
total += i
return total - n
def _A ( __snake_case :int = 1_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = sum(
i
for i in range(1 , __snake_case )
if sum_of_divisors(sum_of_divisors(__snake_case ) ) == i and sum_of_divisors(__snake_case ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 693 | 1 |
from math import factorial
def _A ( __snake_case :int , __snake_case :int , __snake_case :float ) -> float:
"""simple docstring"""
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(__snake_case , __snake_case ) or not isinstance(__snake_case , __snake_case ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
__SCREAMING_SNAKE_CASE = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
__SCREAMING_SNAKE_CASE = float(factorial(__snake_case ) )
coefficient /= factorial(__snake_case ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75))
| 693 |
def _A ( __snake_case :int , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 | 1 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_snake_case : Any = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
_snake_case : Optional[Any] = {
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.15},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
_snake_case : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_snake_case : Optional[int] = 'facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
_snake_case : Optional[int] = 'allenai'
def _A ( __snake_case :List[str] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = dict((re.sub(R"@@$" , "" , __snake_case ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , __snake_case ), v) for k, v in d.items() )
__SCREAMING_SNAKE_CASE = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
__SCREAMING_SNAKE_CASE = d[k] # restore
return da
def _A ( __snake_case :List[str] , __snake_case :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
assert os.path.exists(__snake_case )
os.makedirs(__snake_case , exist_ok=__snake_case )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
__SCREAMING_SNAKE_CASE = basename(__snake_case )
__SCREAMING_SNAKE_CASE = dirname(__snake_case )
__SCREAMING_SNAKE_CASE = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
__SCREAMING_SNAKE_CASE = cls.hub_models()
__SCREAMING_SNAKE_CASE = {"bpe": "fastbpe", "tokenizer": "moses"}
__SCREAMING_SNAKE_CASE = "."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
__SCREAMING_SNAKE_CASE = hub_utils.from_pretrained(
__snake_case , __snake_case , __snake_case , archive_map=__snake_case , **__snake_case )
__SCREAMING_SNAKE_CASE = vars(chkpt["args"]["model"] )
__SCREAMING_SNAKE_CASE = args["source_lang"]
__SCREAMING_SNAKE_CASE = args["target_lang"]
__SCREAMING_SNAKE_CASE = dirname(__snake_case )
__SCREAMING_SNAKE_CASE = basename(__snake_case )
# dicts
__SCREAMING_SNAKE_CASE = os.path.join(__snake_case , f'''dict.{src_lang}.txt''' )
__SCREAMING_SNAKE_CASE = os.path.join(__snake_case , f'''dict.{tgt_lang}.txt''' )
__SCREAMING_SNAKE_CASE = Dictionary.load(__snake_case )
__SCREAMING_SNAKE_CASE = rewrite_dict_keys(src_dict.indices )
__SCREAMING_SNAKE_CASE = len(__snake_case )
__SCREAMING_SNAKE_CASE = os.path.join(__snake_case , "vocab-src.json" )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(__snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__snake_case , ensure_ascii=__snake_case , indent=__snake_case ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
__SCREAMING_SNAKE_CASE = True
for k in src_vocab.keys():
if not k.islower():
__SCREAMING_SNAKE_CASE = False
break
__SCREAMING_SNAKE_CASE = Dictionary.load(__snake_case )
__SCREAMING_SNAKE_CASE = rewrite_dict_keys(tgt_dict.indices )
__SCREAMING_SNAKE_CASE = len(__snake_case )
__SCREAMING_SNAKE_CASE = os.path.join(__snake_case , "vocab-tgt.json" )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(__snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__snake_case , ensure_ascii=__snake_case , indent=__snake_case ) )
# merges_file (bpecodes)
__SCREAMING_SNAKE_CASE = os.path.join(__snake_case , VOCAB_FILES_NAMES["merges_file"] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
__SCREAMING_SNAKE_CASE = os.path.join(__snake_case , __snake_case )
if os.path.exists(__snake_case ):
break
with open(__snake_case , encoding="utf-8" ) as fin:
__SCREAMING_SNAKE_CASE = fin.read()
__SCREAMING_SNAKE_CASE = re.sub(R" \d+$" , "" , __snake_case , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(__snake_case , "w" , encoding="utf-8" ) as fout:
fout.write(__snake_case )
# model config
__SCREAMING_SNAKE_CASE = os.path.join(__snake_case , "config.json" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args["bpe"]}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args["tokenizer"]}'''
__SCREAMING_SNAKE_CASE = {
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.0_2,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
__SCREAMING_SNAKE_CASE = 5
__SCREAMING_SNAKE_CASE = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
__SCREAMING_SNAKE_CASE = best_score_hparams[model_dir]["length_penalty"]
else:
__SCREAMING_SNAKE_CASE = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(__snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__snake_case , ensure_ascii=__snake_case , indent=__snake_case ) )
# tokenizer config
__SCREAMING_SNAKE_CASE = os.path.join(__snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = {
"langs": [src_lang, tgt_lang],
"model_max_length": 1024,
"do_lower_case": do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(__snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__snake_case , ensure_ascii=__snake_case , indent=__snake_case ) )
# model
__SCREAMING_SNAKE_CASE = chkpt["models"][0]
__SCREAMING_SNAKE_CASE = model.state_dict()
# rename keys to start with 'model.'
__SCREAMING_SNAKE_CASE = OrderedDict(("model." + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
__SCREAMING_SNAKE_CASE = [
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(__snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = FSMTConfig.from_pretrained(__snake_case )
__SCREAMING_SNAKE_CASE = FSMTForConditionalGeneration(__snake_case )
# check that it loads ok
model_new.load_state_dict(__snake_case , strict=__snake_case )
# save
__SCREAMING_SNAKE_CASE = os.path.join(__snake_case , __snake_case )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(__snake_case , __snake_case )
print("Conversion is done!" )
print("\nLast step is to upload the files to s3" )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
_snake_case : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case : Union[str, Any] = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 693 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a, _a=99, _a=13, _a=7, _a=9, _a=True, _a=True, _a=False, _a=32, _a=5, _a=4, _a=37, _a=8, _a=0.1, _a=0.002, _a=1, _a=0, _a=0, _a=None, _a=None, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = encoder_seq_length
__SCREAMING_SNAKE_CASE = decoder_seq_length
# For common tests
__SCREAMING_SNAKE_CASE = self.decoder_seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_attention_mask
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = d_ff
__SCREAMING_SNAKE_CASE = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE = dropout_rate
__SCREAMING_SNAKE_CASE = initializer_factor
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = decoder_start_token_id
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = decoder_layers
def __lowerCAmelCase ( self ) -> Optional[int]:
return TaConfig.from_pretrained("google/umt5-base" )
def __lowerCAmelCase ( self, _a, _a, _a, _a=None, _a=None, _a=None, _a=None, _a=None, ) -> int:
if attention_mask is None:
__SCREAMING_SNAKE_CASE = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.num_hidden_layers, config.num_attention_heads, device=_a )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.num_decoder_layers, config.num_attention_heads, device=_a )
if cross_attn_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers, config.num_attention_heads, device=_a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__SCREAMING_SNAKE_CASE = input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = decoder_input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = config.num_attention_heads
__SCREAMING_SNAKE_CASE = self.prepare_inputs_dict(_a, _a, _a )
return config, input_dict
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCAmelCase ( self ) -> Optional[int]:
return TaConfig(
vocab_size=1_66, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return TaConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a )
model.to(_a )
model.eval()
__SCREAMING_SNAKE_CASE = model(
input_ids=_a, decoder_input_ids=_a, attention_mask=_a, decoder_attention_mask=_a, )
__SCREAMING_SNAKE_CASE = model(input_ids=_a, decoder_input_ids=_a )
__SCREAMING_SNAKE_CASE = result.last_hidden_state
__SCREAMING_SNAKE_CASE = result.past_key_values
__SCREAMING_SNAKE_CASE = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_a ), config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ), 4 )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Tuple:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).get_decoder().to(_a ).eval()
# first forward pass
__SCREAMING_SNAKE_CASE = model(_a, use_cache=_a )
__SCREAMING_SNAKE_CASE = model(_a )
__SCREAMING_SNAKE_CASE = model(_a, use_cache=_a )
self.parent.assertTrue(len(_a ) == len(_a ) )
self.parent.assertTrue(len(_a ) == len(_a ) + 1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1), config.vocab_size )
# append to next input_ids and
__SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens], dim=-1 )
__SCREAMING_SNAKE_CASE = model(_a )["last_hidden_state"]
__SCREAMING_SNAKE_CASE = model(_a, past_key_values=_a )["last_hidden_state"]
# select random slice
__SCREAMING_SNAKE_CASE = ids_tensor((1,), output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a, _a, atol=1E-3 ) )
def __lowerCAmelCase ( self, _a, _a, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).to(_a ).half().eval()
__SCREAMING_SNAKE_CASE = model(**_a )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(_a ).any().item() )
@require_torch
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =(
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE__ =(UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ =(
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE__ =[0.8, 0.9]
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = UMTaModel(config_and_inputs[0] ).to(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_a, (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]), f'''{tmpdirname}/t5_test.onnx''', export_params=_a, opset_version=9, input_names=["input_ids", "decoder_input_ids"], )
@unittest.skipIf(torch_device == "cpu", "Cant do half precision" )
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_a )
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = config_and_inputs[0]
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(_a ).eval()
model.to(_a )
__SCREAMING_SNAKE_CASE = {
"head_mask": torch.zeros(config.num_layers, config.num_heads, device=_a ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ),
}
for attn_name, (name, mask) in zip(_a, head_masking.items() ):
__SCREAMING_SNAKE_CASE = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers, config.num_heads, device=_a )
__SCREAMING_SNAKE_CASE = model.generate(
config_and_inputs[1]["input_ids"], num_beams=1, max_length=3, output_attentions=_a, return_dict_in_generate=_a, **_a, )
# We check the state of decoder_attentions and cross_attentions just from the last step
__SCREAMING_SNAKE_CASE = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ), 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def __lowerCAmelCase ( self ) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration.from_pretrained("google/umt5-small", return_dict=_a ).to(_a )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("google/umt5-small", use_fast=_a, legacy=_a )
__SCREAMING_SNAKE_CASE = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
__SCREAMING_SNAKE_CASE = tokenizer(_a, return_tensors="pt", padding=_a ).input_ids
# fmt: off
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(_a, _a )
__SCREAMING_SNAKE_CASE = model.generate(input_ids.to(_a ) )
__SCREAMING_SNAKE_CASE = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ <extra_id_56>ajลกietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajลกie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> ํผํด[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_a )
self.assertEqual(_a, _a )
| 693 | 1 |
def _A ( __snake_case :List[str] ) -> Dict: # noqa: E741
"""simple docstring"""
__SCREAMING_SNAKE_CASE = len(__snake_case )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = [0] * n
__SCREAMING_SNAKE_CASE = [False] * n
__SCREAMING_SNAKE_CASE = [False] * n
def dfs(__snake_case :Optional[Any] , __snake_case :int , __snake_case :List[str] , __snake_case :Tuple ):
if parent == root:
out_edge_count += 1
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__SCREAMING_SNAKE_CASE = dfs(__snake_case , __snake_case , __snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__SCREAMING_SNAKE_CASE = True
# AP found via cycle
if at == low[to]:
__SCREAMING_SNAKE_CASE = True
else:
__SCREAMING_SNAKE_CASE = min(low[at] , __snake_case )
return out_edge_count
for i in range(__snake_case ):
if not visited[i]:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = dfs(__snake_case , __snake_case , -1 , __snake_case )
__SCREAMING_SNAKE_CASE = out_edge_count > 1
for x in range(len(__snake_case ) ):
if is_art[x] is True:
print(__snake_case )
# Adjacency list of graph
_snake_case : List[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 693 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _A ( __snake_case :BertModel , __snake_case :str , __snake_case :str ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
__SCREAMING_SNAKE_CASE = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__snake_case ):
os.makedirs(__snake_case )
__SCREAMING_SNAKE_CASE = model.state_dict()
def to_tf_var_name(__snake_case :str ):
for patt, repl in iter(__snake_case ):
__SCREAMING_SNAKE_CASE = name.replace(__snake_case , __snake_case )
return f'''bert/{name}'''
def create_tf_var(__snake_case :np.ndarray , __snake_case :str , __snake_case :tf.Session ):
__SCREAMING_SNAKE_CASE = tf.dtypes.as_dtype(tensor.dtype )
__SCREAMING_SNAKE_CASE = tf.get_variable(dtype=__snake_case , shape=tensor.shape , name=__snake_case , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__snake_case )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__SCREAMING_SNAKE_CASE = to_tf_var_name(__snake_case )
__SCREAMING_SNAKE_CASE = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__SCREAMING_SNAKE_CASE = torch_tensor.T
__SCREAMING_SNAKE_CASE = create_tf_var(tensor=__snake_case , name=__snake_case , session=__snake_case )
tf.keras.backend.set_value(__snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = session.run(__snake_case )
print(f'''Successfully created {tf_name}: {np.allclose(__snake_case , __snake_case )}''' )
__SCREAMING_SNAKE_CASE = tf.train.Saver(tf.trainable_variables() )
saver.save(__snake_case , os.path.join(__snake_case , model_name.replace("-" , "_" ) + ".ckpt" ) )
def _A ( __snake_case :str=None ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=__snake_case , required=__snake_case , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=__snake_case , default=__snake_case , required=__snake_case , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=__snake_case , required=__snake_case , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=__snake_case , required=__snake_case , help="Directory in which to save tensorflow model" )
__SCREAMING_SNAKE_CASE = parser.parse_args(__snake_case )
__SCREAMING_SNAKE_CASE = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__snake_case , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 693 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.