code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=30 , __magic_name__=2 , __magic_name__=3 , __magic_name__=True , __magic_name__=True , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=10 , __magic_name__=0.02 , __magic_name__=None , ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : List[Any] = image_size
snake_case_ : Optional[int] = patch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : Optional[Any] = is_training
snake_case_ : List[Any] = use_labels
snake_case_ : Optional[int] = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : Any = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : List[str] = type_sequence_label_size
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : List[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case_ : Any = (image_size // patch_size) ** 2
snake_case_ : int = num_patches + 1
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : List[Any] = None
if self.use_labels:
snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : int = self.get_config()
return config, pixel_values, labels
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
snake_case_ : int = ViTMSNModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : List[str] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
snake_case_ : int = self.type_sequence_label_size
snake_case_ : Tuple = ViTMSNForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : Any = model(__magic_name__ , labels=__magic_name__ )
print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''' )
print('''Labels: {labels}''' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case_ : Optional[int] = 1
snake_case_ : List[str] = ViTMSNForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ : Any = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Any = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ : Optional[int] = config_and_inputs
snake_case_ : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _a, _a, unittest.TestCase ):
lowerCamelCase_ : List[Any] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
lowerCamelCase_ : Optional[int] = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase_ : int = False
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : int = False
lowerCamelCase_ : Optional[int] = False
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = ViTMSNModelTester(self )
snake_case_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMSN does not use inputs_embeds''' )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ , snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Any = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Tuple = model_class(__magic_name__ )
snake_case_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Optional[int] = [*signature.parameters.keys()]
snake_case_ : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : str = ViTMSNModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def lowerCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''' ) if is_vision_available() else None
@slow
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
torch.manual_seed(2 )
snake_case_ : List[str] = ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''' ).to(__magic_name__ )
snake_case_ : str = self.default_image_processor
snake_case_ : str = prepare_img()
snake_case_ : int = image_processor(images=__magic_name__ , return_tensors='''pt''' ).to(__magic_name__ )
# forward pass
with torch.no_grad():
snake_case_ : Optional[int] = model(**__magic_name__ )
# verify the logits
snake_case_ : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
snake_case_ : List[Any] = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) )
| 60 |
def _A ( __snake_case :int = 400_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__snake_case )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = b, a + b
return sum(__snake_case )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 693 | 0 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __get__( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=None ) -> str:
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
lowerCAmelCase__ = "__cached_" + self.fget.__name__
lowerCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if cached is None:
lowerCAmelCase__ = self.fget(SCREAMING_SNAKE_CASE__ )
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return cached
def _A ( lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F'invalid truth value {val!r}' )
def _A ( lowerCAmelCase_ : Dict ):
"""simple docstring"""
if is_torch_fx_proxy(lowerCAmelCase_ ):
return True
if is_torch_available():
import torch
if isinstance(lowerCAmelCase_ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowerCAmelCase_ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowerCAmelCase_ , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowerCAmelCase_ , np.ndarray )
def _A ( lowerCAmelCase_ : Tuple ):
"""simple docstring"""
return isinstance(lowerCAmelCase_ , np.ndarray )
def _A ( lowerCAmelCase_ : List[str] ):
"""simple docstring"""
return _is_numpy(lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
import torch
return isinstance(lowerCAmelCase_ , torch.Tensor )
def _A ( lowerCAmelCase_ : Dict ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch(lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
import torch
return isinstance(lowerCAmelCase_ , torch.device )
def _A ( lowerCAmelCase_ : int ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_device(lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : Any ):
"""simple docstring"""
import torch
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCAmelCase__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
else:
return False
return isinstance(lowerCAmelCase_ , torch.dtype )
def _A ( lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_dtype(lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : Dict ):
"""simple docstring"""
import tensorflow as tf
return isinstance(lowerCAmelCase_ , tf.Tensor )
def _A ( lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
return False if not is_tf_available() else _is_tensorflow(lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : Dict ):
"""simple docstring"""
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowerCAmelCase_ , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(lowerCAmelCase_ )
return type(lowerCAmelCase_ ) == tf.Tensor
def _A ( lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : List[str] ):
"""simple docstring"""
import jax.numpy as jnp # noqa: F811
return isinstance(lowerCAmelCase_ , jnp.ndarray )
def _A ( lowerCAmelCase_ : Any ):
"""simple docstring"""
return False if not is_flax_available() else _is_jax(lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : Any ):
"""simple docstring"""
if isinstance(lowerCAmelCase_ , (dict, UserDict) ):
return {k: to_py_obj(lowerCAmelCase_ ) for k, v in obj.items()}
elif isinstance(lowerCAmelCase_ , (list, tuple) ):
return [to_py_obj(lowerCAmelCase_ ) for o in obj]
elif is_tf_tensor(lowerCAmelCase_ ):
return obj.numpy().tolist()
elif is_torch_tensor(lowerCAmelCase_ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowerCAmelCase_ ):
return np.asarray(lowerCAmelCase_ ).tolist()
elif isinstance(lowerCAmelCase_ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def _A ( lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
if isinstance(lowerCAmelCase_ , (dict, UserDict) ):
return {k: to_numpy(lowerCAmelCase_ ) for k, v in obj.items()}
elif isinstance(lowerCAmelCase_ , (list, tuple) ):
return np.array(lowerCAmelCase_ )
elif is_tf_tensor(lowerCAmelCase_ ):
return obj.numpy()
elif is_torch_tensor(lowerCAmelCase_ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowerCAmelCase_ ):
return np.asarray(lowerCAmelCase_ )
else:
return obj
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def a ( self : List[Any] ) -> Any:
lowerCAmelCase__ = fields(self )
# Safety and consistency checks
if not len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(f'{self.__class__.__name__} has no fields.' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'{self.__class__.__name__} should not have more than one required field.' )
lowerCAmelCase__ = getattr(self , class_fields[0].name )
lowerCAmelCase__ = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(SCREAMING_SNAKE_CASE__ ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = first_field.items()
lowerCAmelCase__ = True
else:
try:
lowerCAmelCase__ = iter(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = True
except TypeError:
lowerCAmelCase__ = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(SCREAMING_SNAKE_CASE__ ):
if (
not isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) )
or not len(SCREAMING_SNAKE_CASE__ ) == 2
or not isinstance(element[0] , SCREAMING_SNAKE_CASE__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowerCAmelCase__ = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'Cannot set key/value for {element}. It needs to be a tuple (key, value).' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
lowerCAmelCase__ = element[1]
elif first_field is not None:
lowerCAmelCase__ = first_field
else:
for field in class_fields:
lowerCAmelCase__ = getattr(self , field.name )
if v is not None:
lowerCAmelCase__ = v
def __delitem__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> int:
raise Exception(f'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.' )
def a ( self : Dict , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
raise Exception(f'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.' )
def a ( self : List[str] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Dict ) -> Any:
raise Exception(f'You cannot use ``pop`` on a {self.__class__.__name__} instance.' )
def a ( self : Any , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
raise Exception(f'You cannot use ``update`` on a {self.__class__.__name__} instance.' )
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
super().__setattr__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __setitem__( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
# Will raise a KeyException if needed
super().__setitem__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def a ( self : str ) -> Tuple[Any]:
return tuple(self[k] for k in self.keys() )
class __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
@classmethod
def a ( cls : Optional[int] , SCREAMING_SNAKE_CASE__ : Any ) -> str:
raise ValueError(
f'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}' )
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = "longest"
snake_case__ = "max_length"
snake_case__ = "do_not_pad"
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = "pt"
snake_case__ = "tf"
snake_case__ = "np"
snake_case__ = "jax"
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[ContextManager] ) -> Union[str, Any]:
lowerCAmelCase__ = context_managers
lowerCAmelCase__ = ExitStack()
def __enter__( self : List[Any] ) -> Dict:
for context_manager in self.context_managers:
self.stack.enter_context(SCREAMING_SNAKE_CASE__ )
def __exit__( self : List[Any] , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
self.stack.__exit__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _A ( lowerCAmelCase_ : int ):
"""simple docstring"""
lowerCAmelCase__ = infer_framework(lowerCAmelCase_ )
if framework == "tf":
lowerCAmelCase__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase__ = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase__ = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def _A ( lowerCAmelCase_ : Any ):
"""simple docstring"""
lowerCAmelCase__ = model_class.__name__
lowerCAmelCase__ = infer_framework(lowerCAmelCase_ )
if framework == "tf":
lowerCAmelCase__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase__ = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase__ = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def _A ( lowerCAmelCase_ : MutableMapping , lowerCAmelCase_ : str = "" , lowerCAmelCase_ : str = "." ):
"""simple docstring"""
def _flatten_dict(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str]="" , lowerCAmelCase_ : Tuple="." ):
for k, v in d.items():
lowerCAmelCase__ = str(lowerCAmelCase_ ) + delimiter + str(lowerCAmelCase_ ) if parent_key else k
if v and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
yield from flatten_dict(lowerCAmelCase_ , lowerCAmelCase_ , delimiter=lowerCAmelCase_ ).items()
else:
yield key, v
return dict(_flatten_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) )
@contextmanager
def _A ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : bool = False ):
"""simple docstring"""
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def _A ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : str=None ):
"""simple docstring"""
if is_numpy_array(lowerCAmelCase_ ):
return np.transpose(lowerCAmelCase_ , axes=lowerCAmelCase_ )
elif is_torch_tensor(lowerCAmelCase_ ):
return array.T if axes is None else array.permute(*lowerCAmelCase_ )
elif is_tf_tensor(lowerCAmelCase_ ):
import tensorflow as tf
return tf.transpose(lowerCAmelCase_ , perm=lowerCAmelCase_ )
elif is_jax_tensor(lowerCAmelCase_ ):
return jnp.transpose(lowerCAmelCase_ , axes=lowerCAmelCase_ )
else:
raise ValueError(F'Type not supported for transpose: {type(lowerCAmelCase_ )}.' )
def _A ( lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
if is_numpy_array(lowerCAmelCase_ ):
return np.reshape(lowerCAmelCase_ , lowerCAmelCase_ )
elif is_torch_tensor(lowerCAmelCase_ ):
return array.reshape(*lowerCAmelCase_ )
elif is_tf_tensor(lowerCAmelCase_ ):
import tensorflow as tf
return tf.reshape(lowerCAmelCase_ , lowerCAmelCase_ )
elif is_jax_tensor(lowerCAmelCase_ ):
return jnp.reshape(lowerCAmelCase_ , lowerCAmelCase_ )
else:
raise ValueError(F'Type not supported for reshape: {type(lowerCAmelCase_ )}.' )
def _A ( lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str]=None ):
"""simple docstring"""
if is_numpy_array(lowerCAmelCase_ ):
return np.squeeze(lowerCAmelCase_ , axis=lowerCAmelCase_ )
elif is_torch_tensor(lowerCAmelCase_ ):
return array.squeeze() if axis is None else array.squeeze(dim=lowerCAmelCase_ )
elif is_tf_tensor(lowerCAmelCase_ ):
import tensorflow as tf
return tf.squeeze(lowerCAmelCase_ , axis=lowerCAmelCase_ )
elif is_jax_tensor(lowerCAmelCase_ ):
return jnp.squeeze(lowerCAmelCase_ , axis=lowerCAmelCase_ )
else:
raise ValueError(F'Type not supported for squeeze: {type(lowerCAmelCase_ )}.' )
def _A ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
if is_numpy_array(lowerCAmelCase_ ):
return np.expand_dims(lowerCAmelCase_ , lowerCAmelCase_ )
elif is_torch_tensor(lowerCAmelCase_ ):
return array.unsqueeze(dim=lowerCAmelCase_ )
elif is_tf_tensor(lowerCAmelCase_ ):
import tensorflow as tf
return tf.expand_dims(lowerCAmelCase_ , axis=lowerCAmelCase_ )
elif is_jax_tensor(lowerCAmelCase_ ):
return jnp.expand_dims(lowerCAmelCase_ , axis=lowerCAmelCase_ )
else:
raise ValueError(F'Type not supported for expand_dims: {type(lowerCAmelCase_ )}.' )
def _A ( lowerCAmelCase_ : int ):
"""simple docstring"""
if is_numpy_array(lowerCAmelCase_ ):
return np.size(lowerCAmelCase_ )
elif is_torch_tensor(lowerCAmelCase_ ):
return array.numel()
elif is_tf_tensor(lowerCAmelCase_ ):
import tensorflow as tf
return tf.size(lowerCAmelCase_ )
elif is_jax_tensor(lowerCAmelCase_ ):
return array.size
else:
raise ValueError(F'Type not supported for expand_dims: {type(lowerCAmelCase_ )}.' )
def _A ( lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
for key, value in auto_map.items():
if isinstance(lowerCAmelCase_ , (tuple, list) ):
lowerCAmelCase__ = [F'{repo_id}--{v}' if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
lowerCAmelCase__ = F'{repo_id}--{value}'
return auto_map
def _A ( lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
for base_class in inspect.getmro(lowerCAmelCase_ ):
lowerCAmelCase__ = base_class.__module__
lowerCAmelCase__ = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F'Could not infer framework from class {model_class}.' )
| 61 |
from __future__ import annotations
_snake_case : str = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_snake_case : Optional[int] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = len(__snake_case )
for i in range(__snake_case ):
__SCREAMING_SNAKE_CASE = -1
for j in range(i + 1 , __snake_case ):
if arr[i] < arr[j]:
__SCREAMING_SNAKE_CASE = arr[j]
break
result.append(__snake_case )
return result
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for i, outer in enumerate(__snake_case ):
__SCREAMING_SNAKE_CASE = -1
for inner in arr[i + 1 :]:
if outer < inner:
__SCREAMING_SNAKE_CASE = inner
break
result.append(__snake_case )
return result
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = len(__snake_case )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [-1] * arr_size
for index in reversed(range(__snake_case ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__SCREAMING_SNAKE_CASE = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_snake_case : Optional[Any] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 693 | 0 |
snake_case = {
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
}
| 62 |
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a ) -> Any:
__SCREAMING_SNAKE_CASE = data
__SCREAMING_SNAKE_CASE = None
def __repr__( self ) -> str:
return f'''Node({self.data})'''
class __SCREAMING_SNAKE_CASE :
def __init__( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = None
def __iter__( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.head
while node:
yield node.data
__SCREAMING_SNAKE_CASE = node.next
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join([str(_a ) for item in self] )
def __getitem__( self, _a ) -> Any:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self, _a, _a ) -> None:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
__SCREAMING_SNAKE_CASE = self.head
for _ in range(_a ):
__SCREAMING_SNAKE_CASE = current.next
__SCREAMING_SNAKE_CASE = data
def __lowerCAmelCase ( self, _a ) -> None:
self.insert_nth(len(self ), _a )
def __lowerCAmelCase ( self, _a ) -> None:
self.insert_nth(0, _a )
def __lowerCAmelCase ( self, _a, _a ) -> None:
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
__SCREAMING_SNAKE_CASE = Node(_a )
if self.head is None:
__SCREAMING_SNAKE_CASE = new_node
elif index == 0:
__SCREAMING_SNAKE_CASE = self.head # link new_node to head
__SCREAMING_SNAKE_CASE = new_node
else:
__SCREAMING_SNAKE_CASE = self.head
for _ in range(index - 1 ):
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = new_node
def __lowerCAmelCase ( self ) -> None: # print every node data
print(self )
def __lowerCAmelCase ( self ) -> Any:
return self.delete_nth(0 )
def __lowerCAmelCase ( self ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def __lowerCAmelCase ( self, _a = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
__SCREAMING_SNAKE_CASE = self.head # default first node
if index == 0:
__SCREAMING_SNAKE_CASE = self.head.next
else:
__SCREAMING_SNAKE_CASE = self.head
for _ in range(index - 1 ):
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next.next
return delete_node.data
def __lowerCAmelCase ( self ) -> bool:
return self.head is None
def __lowerCAmelCase ( self ) -> None:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = self.head
while current:
# Store the current node's next node.
__SCREAMING_SNAKE_CASE = current.next
# Make the current node's next point backwards
__SCREAMING_SNAKE_CASE = prev
# Make the previous node be the current node
__SCREAMING_SNAKE_CASE = current
# Make the current node the next node (to progress iteration)
__SCREAMING_SNAKE_CASE = next_node
# Return prev in order to put the head at the end
__SCREAMING_SNAKE_CASE = prev
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = LinkedList()
assert linked_list.is_empty() is True
assert str(__snake_case ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__snake_case ) == i
linked_list.insert_nth(__snake_case , i + 1 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__snake_case ) == 9
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__SCREAMING_SNAKE_CASE = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(-8 , 1 ) )
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [
-9,
100,
Node(7734_5112 ),
"dlrow olleH",
7,
5555,
0,
-1_9_2.5_5_5_5_5,
"Hello, world!",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
__SCREAMING_SNAKE_CASE = LinkedList()
for i in test_input:
linked_list.insert_tail(__snake_case )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__SCREAMING_SNAKE_CASE = linked_list.delete_head()
assert result == -9
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__SCREAMING_SNAKE_CASE = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__SCREAMING_SNAKE_CASE = linked_list.delete_nth(10 )
assert result is None
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__snake_case )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__snake_case )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _A ( ) -> Union[str, Any]:
"""simple docstring"""
from doctest import testmod
testmod()
__SCREAMING_SNAKE_CASE = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(__snake_case )
print("\nReading/changing Node data using indexing:" )
print(f'''Element at Position 1: {linked_list[1]}''' )
__SCREAMING_SNAKE_CASE = input("Enter New Value: " ).strip()
print("New list:" )
print(__snake_case )
print(f'''length of linked_list is : {len(__snake_case )}''' )
if __name__ == "__main__":
main()
| 693 | 0 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Dict = KandinskyVaaPriorPipeline
a : List[Any] = ['prompt']
a : int = ['prompt', 'negative_prompt']
a : List[str] = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
a : Any = False
@property
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
return 32
@property
def UpperCAmelCase ( self : Tuple ) -> Any:
return 32
@property
def UpperCAmelCase ( self : int ) -> List[Any]:
return self.time_input_dim
@property
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self : List[Any] ) -> int:
return 100
@property
def UpperCAmelCase ( self : Any ) -> str:
__UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__lowercase )
@property
def UpperCAmelCase ( self : List[str] ) -> Any:
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
__UpperCAmelCase : Any = PriorTransformer(**__lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__UpperCAmelCase : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCAmelCase ( self : Dict ) -> Dict:
torch.manual_seed(0 )
__UpperCAmelCase : List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__UpperCAmelCase : Any = CLIPVisionModelWithProjection(__lowercase )
return model
@property
def UpperCAmelCase ( self : Any ) -> List[Any]:
__UpperCAmelCase : int = CLIPImageProcessor(
crop_size=224 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase : str = self.dummy_prior
__UpperCAmelCase : List[str] = self.dummy_image_encoder
__UpperCAmelCase : List[str] = self.dummy_text_encoder
__UpperCAmelCase : Optional[int] = self.dummy_tokenizer
__UpperCAmelCase : Any = self.dummy_image_processor
__UpperCAmelCase : Tuple = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowercase , clip_sample_range=10.0 , )
__UpperCAmelCase : Optional[Any] = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def UpperCAmelCase ( self : int , __lowercase : Dict , __lowercase : Tuple=0 ) -> int:
if str(__lowercase ).startswith("""mps""" ):
__UpperCAmelCase : Optional[int] = torch.manual_seed(__lowercase )
else:
__UpperCAmelCase : str = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__UpperCAmelCase : str = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__UpperCAmelCase : Dict = """cpu"""
__UpperCAmelCase : Any = self.get_dummy_components()
__UpperCAmelCase : Optional[Any] = self.pipeline_class(**__lowercase )
__UpperCAmelCase : int = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : int = pipe(**self.get_dummy_inputs(__lowercase ) )
__UpperCAmelCase : Any = output.image_embeds
__UpperCAmelCase : str = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__UpperCAmelCase : int = image[0, -10:]
__UpperCAmelCase : List[Any] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__UpperCAmelCase : str = np.array(
[-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCAmelCase ( self : str ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = torch_device == """cpu"""
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : List[str] = False
self._test_inference_batch_single_identical(
test_max_difference=__lowercase , relax_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
@skip_mps
def UpperCAmelCase ( self : Dict ) -> int:
__UpperCAmelCase : Optional[int] = torch_device == """cpu"""
__UpperCAmelCase : Union[str, Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
| 63 |
import argparse
import json
from tqdm import tqdm
def _A ( ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=__snake_case , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=__snake_case , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=__snake_case , help="where to store parsed gold_data_path file" , )
__SCREAMING_SNAKE_CASE = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
__SCREAMING_SNAKE_CASE = json.load(__snake_case )
for dpr_record in tqdm(__snake_case ):
__SCREAMING_SNAKE_CASE = dpr_record["question"]
__SCREAMING_SNAKE_CASE = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(__snake_case ) + "\n" )
if __name__ == "__main__":
main()
| 693 | 0 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def A__ ( snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Any ):
SCREAMING_SNAKE_CASE__: Optional[int]= multiprocessing.Manager()
SCREAMING_SNAKE_CASE__: List[str]= manager.list()
SCREAMING_SNAKE_CASE__: List[str]= multiprocessing.Process(target=snake_case_ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def A__ ( snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
SCREAMING_SNAKE_CASE__: List[str]= shutil.rmtree
SCREAMING_SNAKE_CASE__: Union[str, Any]= os.rmdir
SCREAMING_SNAKE_CASE__: List[Any]= os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
SCREAMING_SNAKE_CASE__: Tuple= {}
with swallow_io():
with time_limit(snake_case_ ):
exec(snake_case_ , snake_case_ )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(F'failed: {e}' )
# Needed for cleaning up.
SCREAMING_SNAKE_CASE__: int= rmtree
SCREAMING_SNAKE_CASE__: List[Any]= rmdir
SCREAMING_SNAKE_CASE__: Any= chdir
@contextlib.contextmanager
def A__ ( snake_case_ : int ):
def signal_handler(snake_case_ : List[str] , snake_case_ : Union[str, Any] ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , snake_case_ )
signal.signal(signal.SIGALRM , snake_case_ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def A__ ( ):
SCREAMING_SNAKE_CASE__: List[str]= WriteOnlyStringIO()
with contextlib.redirect_stdout(snake_case_ ):
with contextlib.redirect_stderr(snake_case_ ):
with redirect_stdin(snake_case_ ):
yield
@contextlib.contextmanager
def A__ ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(snake_case_ ):
yield dirname
class _lowerCamelCase ( UpperCamelCase_ ):
pass
class _lowerCamelCase ( io.StringIO ):
def UpperCamelCase_ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> str:
raise OSError
def UpperCamelCase_ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> Optional[int]:
raise OSError
def UpperCamelCase_ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> str:
raise OSError
def UpperCamelCase_ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> Optional[int]:
return False
class _lowerCamelCase ( contextlib._RedirectStream ): # type: ignore
__a = "stdin"
@contextlib.contextmanager
def A__ ( snake_case_ : Dict ):
if root == ".":
yield
return
SCREAMING_SNAKE_CASE__: Tuple= os.getcwd()
os.chdir(snake_case_ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(snake_case_ )
def A__ ( snake_case_ : Union[str, Any]=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
SCREAMING_SNAKE_CASE__: List[str]= None
SCREAMING_SNAKE_CASE__: Tuple= None
import os
SCREAMING_SNAKE_CASE__: Any= '''1'''
SCREAMING_SNAKE_CASE__: List[Any]= None
SCREAMING_SNAKE_CASE__: Any= None
SCREAMING_SNAKE_CASE__: Tuple= None
SCREAMING_SNAKE_CASE__: Optional[Any]= None
SCREAMING_SNAKE_CASE__: str= None
SCREAMING_SNAKE_CASE__: str= None
SCREAMING_SNAKE_CASE__: Optional[Any]= None
SCREAMING_SNAKE_CASE__: str= None
SCREAMING_SNAKE_CASE__: int= None
SCREAMING_SNAKE_CASE__: str= None
SCREAMING_SNAKE_CASE__: Any= None
SCREAMING_SNAKE_CASE__: str= None
SCREAMING_SNAKE_CASE__: int= None
SCREAMING_SNAKE_CASE__: Tuple= None
SCREAMING_SNAKE_CASE__: Optional[int]= None
SCREAMING_SNAKE_CASE__: List[Any]= None
SCREAMING_SNAKE_CASE__: List[Any]= None
SCREAMING_SNAKE_CASE__: Optional[int]= None
SCREAMING_SNAKE_CASE__: Any= None
SCREAMING_SNAKE_CASE__: Any= None
SCREAMING_SNAKE_CASE__: List[Any]= None
SCREAMING_SNAKE_CASE__: List[str]= None
SCREAMING_SNAKE_CASE__: Union[str, Any]= None
SCREAMING_SNAKE_CASE__: Any= None
SCREAMING_SNAKE_CASE__: List[str]= None
SCREAMING_SNAKE_CASE__: Optional[int]= None
SCREAMING_SNAKE_CASE__: List[Any]= None
import shutil
SCREAMING_SNAKE_CASE__: List[str]= None
SCREAMING_SNAKE_CASE__: int= None
SCREAMING_SNAKE_CASE__: Tuple= None
import subprocess
SCREAMING_SNAKE_CASE__: int= None # type: ignore
SCREAMING_SNAKE_CASE__: Optional[int]= None
import sys
SCREAMING_SNAKE_CASE__: Optional[Any]= None
SCREAMING_SNAKE_CASE__: Any= None
SCREAMING_SNAKE_CASE__: int= None
SCREAMING_SNAKE_CASE__: Union[str, Any]= None
SCREAMING_SNAKE_CASE__: Union[str, Any]= None
| 64 |
def _A ( __snake_case :int = 10**9 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 693 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class __lowercase ( __lowerCamelCase ):
snake_case_ = """lilt"""
def __init__( self : Union[str, Any] ,A : Any=30_522 ,A : List[Any]=768 ,A : Optional[Any]=12 ,A : Any=12 ,A : str=3_072 ,A : Any="gelu" ,A : Optional[int]=0.1 ,A : Optional[int]=0.1 ,A : List[Any]=512 ,A : Optional[int]=2 ,A : List[str]=0.0_2 ,A : str=1e-12 ,A : Union[str, Any]=0 ,A : Optional[int]="absolute" ,A : Optional[Any]=None ,A : int=4 ,A : str=1_024 ,**A : int ,):
'''simple docstring'''
super().__init__(pad_token_id=A ,**A )
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : List[Any] = num_hidden_layers
UpperCAmelCase__ : Dict = num_attention_heads
UpperCAmelCase__ : Any = hidden_act
UpperCAmelCase__ : Optional[int] = intermediate_size
UpperCAmelCase__ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = max_position_embeddings
UpperCAmelCase__ : Dict = type_vocab_size
UpperCAmelCase__ : Optional[Any] = initializer_range
UpperCAmelCase__ : List[str] = layer_norm_eps
UpperCAmelCase__ : Tuple = position_embedding_type
UpperCAmelCase__ : List[str] = classifier_dropout
UpperCAmelCase__ : Optional[int] = channel_shrink_ratio
UpperCAmelCase__ : str = max_ad_position_embeddings
| 65 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_snake_case , _snake_case , _snake_case : List[Any] = False, False, False
@dataclass
class __SCREAMING_SNAKE_CASE :
SCREAMING_SNAKE_CASE__ =None
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =None
# Automatically constructed
SCREAMING_SNAKE_CASE__ ="dict"
SCREAMING_SNAKE_CASE__ =pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
SCREAMING_SNAKE_CASE__ =field(default="""Audio""" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self ) -> Optional[int]:
return self.pa_type
def __lowerCAmelCase ( self, _a ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(_a, _a ):
return {"bytes": None, "path": value}
elif isinstance(_a, _a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__SCREAMING_SNAKE_CASE = BytesIO()
sf.write(_a, value["array"], value["sampling_rate"], format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__SCREAMING_SNAKE_CASE = np.frombuffer(value["bytes"], dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
__SCREAMING_SNAKE_CASE = np.memmap(value["path"], dtype="h", mode="r" ).astype(np.floataa ) / 3_27_67
__SCREAMING_SNAKE_CASE = BytesIO(bytes() )
sf.write(_a, _a, value["sampling_rate"], format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __lowerCAmelCase ( self, _a, _a = None ) -> dict:
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
__SCREAMING_SNAKE_CASE = xsplitext(_a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
__SCREAMING_SNAKE_CASE = token_per_repo_id or {}
__SCREAMING_SNAKE_CASE = path.split("::" )[-1]
try:
__SCREAMING_SNAKE_CASE = string_to_dict(_a, config.HUB_DATASETS_URL )["repo_id"]
__SCREAMING_SNAKE_CASE = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__SCREAMING_SNAKE_CASE = None
with xopen(_a, "rb", use_auth_token=_a ) as f:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a )
__SCREAMING_SNAKE_CASE = array.T
if self.mono:
__SCREAMING_SNAKE_CASE = librosa.to_mono(_a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
__SCREAMING_SNAKE_CASE = librosa.resample(_a, orig_sr=_a, target_sr=self.sampling_rate )
__SCREAMING_SNAKE_CASE = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __lowerCAmelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def __lowerCAmelCase ( self, _a ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
__SCREAMING_SNAKE_CASE = pa.array([Audio().encode_example(_a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
__SCREAMING_SNAKE_CASE = storage.field("bytes" )
else:
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
__SCREAMING_SNAKE_CASE = storage.field("path" )
else:
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
return array_cast(_a, self.pa_type )
def __lowerCAmelCase ( self, _a ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_a ):
with xopen(_a, "rb" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
return bytes_
__SCREAMING_SNAKE_CASE = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
__SCREAMING_SNAKE_CASE = pa.array(
[os.path.basename(_a ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(_a, self.pa_type )
| 693 | 0 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
UpperCamelCase = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
UpperCamelCase = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
UpperCamelCase = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
return float((preds == labels).mean() )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : Optional[int] = simple_accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : int = float(fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
_lowercase : Optional[Any] = float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )
_lowercase : int = float(spearmanr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def __a ( self ):
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase ):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_lowerCAmelCase , _lowerCAmelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(_lowerCAmelCase , _lowerCAmelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_lowerCAmelCase , _lowerCAmelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 66 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =(IPNDMScheduler,)
SCREAMING_SNAKE_CASE__ =(("""num_inference_steps""", 50),)
def __lowerCAmelCase ( self, **_a ) -> str:
__SCREAMING_SNAKE_CASE = {"num_train_timesteps": 10_00}
config.update(**_a )
return config
def __lowerCAmelCase ( self, _a=0, **_a ) -> List[Any]:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
if time_step is None:
__SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
__SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a )
new_scheduler.set_timesteps(_a )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self ) -> str:
pass
def __lowerCAmelCase ( self, _a=0, **_a ) -> int:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals (must be after setting timesteps)
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
if time_step is None:
__SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
__SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a )
# copy over dummy past residuals
new_scheduler.set_timesteps(_a )
# copy over dummy past residual (must be after setting timesteps)
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self, **_a ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE = 10
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(_a )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE = model(_a, _a )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE = model(_a, _a )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample
return sample
def __lowerCAmelCase ( self ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
if num_inference_steps is not None and hasattr(_a, "set_timesteps" ):
scheduler.set_timesteps(_a )
elif num_inference_steps is not None and not hasattr(_a, "set_timesteps" ):
__SCREAMING_SNAKE_CASE = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.timesteps[5]
__SCREAMING_SNAKE_CASE = scheduler.timesteps[6]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def __lowerCAmelCase ( self ) -> str:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a, time_step=_a )
def __lowerCAmelCase ( self ) -> Optional[Any]:
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=_a, time_step=_a )
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.full_loop()
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 693 | 0 |
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray ) -> bool:
return np.array_equal(snake_case__ , matrix.conjugate().T )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray , snake_case__ :np.ndarray ) -> Any:
_lowercase = v.conjugate().T
_lowercase = v_star.dot(snake_case__ )
assert isinstance(snake_case__ , np.ndarray )
return (v_star_dot.dot(snake_case__ )) / (v_star.dot(snake_case__ ))
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_lowercase = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
_lowercase = np.array([[1], [2], [3]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(snake_case__ , snake_case__ ) )
_lowercase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(snake_case__ , snake_case__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | 67 |
import random
from .binary_exp_mod import bin_exp_mod
def _A ( __snake_case :List[Any] , __snake_case :Union[str, Any]=1000 ) -> int:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__SCREAMING_SNAKE_CASE = n - 1
__SCREAMING_SNAKE_CASE = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__SCREAMING_SNAKE_CASE = 0
while count < prec:
__SCREAMING_SNAKE_CASE = random.randint(2 , n - 1 )
__SCREAMING_SNAKE_CASE = bin_exp_mod(__snake_case , __snake_case , __snake_case )
if b != 1:
__SCREAMING_SNAKE_CASE = True
for _ in range(__snake_case ):
if b == n - 1:
__SCREAMING_SNAKE_CASE = False
break
__SCREAMING_SNAKE_CASE = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_snake_case : int = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 693 | 0 |
from __future__ import annotations
from collections.abc import Iterator
class _A :
"""simple docstring"""
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> None:
__UpperCAmelCase =value
__UpperCAmelCase =None
__UpperCAmelCase =None
class _A :
"""simple docstring"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Node ) -> None:
__UpperCAmelCase =tree
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Node | None ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : int ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _A ( __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
__SCREAMING_SNAKE_CASE = ksize + 1
__SCREAMING_SNAKE_CASE = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__snake_case ):
for x in range(__snake_case ):
# distance from center
__SCREAMING_SNAKE_CASE = x - ksize // 2
__SCREAMING_SNAKE_CASE = y - ksize // 2
# degree to radiant
__SCREAMING_SNAKE_CASE = theta / 180 * np.pi
__SCREAMING_SNAKE_CASE = np.cos(_theta )
__SCREAMING_SNAKE_CASE = np.sin(_theta )
# get kernel x
__SCREAMING_SNAKE_CASE = cos_theta * px + sin_theta * py
# get kernel y
__SCREAMING_SNAKE_CASE = -sin_theta * px + cos_theta * py
# fill kernel
__SCREAMING_SNAKE_CASE = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case : Union[str, Any] = imread('../image_data/lena.jpg')
# turn image in gray scale value
_snake_case : List[str] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case : int = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
_snake_case : List[str] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case : Optional[Any] = out / out.max() * 2_55
_snake_case : Union[str, Any] = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 693 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = ConsistencyModelPipeline
__SCREAMING_SNAKE_CASE = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__SCREAMING_SNAKE_CASE = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__SCREAMING_SNAKE_CASE = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def A ( self : int ):
"""simple docstring"""
__snake_case = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet" , )
return unet
@property
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet_class_cond" , )
return unet
def A ( self : Union[str, Any] , a_ : Dict=False ):
"""simple docstring"""
if class_cond:
__snake_case = self.dummy_cond_unet
else:
__snake_case = self.dummy_uncond_unet
# Default to CM multistep sampler
__snake_case = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__snake_case = {
"unet": unet,
"scheduler": scheduler,
}
return components
def A ( self : List[Any] , a_ : Tuple , a_ : Dict=0 ):
"""simple docstring"""
if str(a_ ).startswith("mps" ):
__snake_case = torch.manual_seed(a_ )
else:
__snake_case = torch.Generator(device=a_ ).manual_seed(a_ )
__snake_case = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [22, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components()
__snake_case = ConsistencyModelPipeline(**a_ )
__snake_case = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = self.get_dummy_inputs(a_ )
__snake_case = pipe(**a_ ).images
assert image.shape == (1, 32, 32, 3)
__snake_case = image[0, -3:, -3:, -1]
__snake_case = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components(class_cond=a_ )
__snake_case = ConsistencyModelPipeline(**a_ )
__snake_case = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = self.get_dummy_inputs(a_ )
__snake_case = 0
__snake_case = pipe(**a_ ).images
assert image.shape == (1, 32, 32, 3)
__snake_case = image[0, -3:, -3:, -1]
__snake_case = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components()
__snake_case = ConsistencyModelPipeline(**a_ )
__snake_case = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = self.get_dummy_inputs(a_ )
__snake_case = 1
__snake_case = None
__snake_case = pipe(**a_ ).images
assert image.shape == (1, 32, 32, 3)
__snake_case = image[0, -3:, -3:, -1]
__snake_case = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A ( self : Dict ):
"""simple docstring"""
__snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components(class_cond=a_ )
__snake_case = ConsistencyModelPipeline(**a_ )
__snake_case = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = self.get_dummy_inputs(a_ )
__snake_case = 1
__snake_case = None
__snake_case = 0
__snake_case = pipe(**a_ ).images
assert image.shape == (1, 32, 32, 3)
__snake_case = image[0, -3:, -3:, -1]
__snake_case = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : int , a_ : Dict=0 , a_ : int=False , a_ : Dict="cpu" , a_ : Any=torch.floataa , a_ : Optional[int]=(1, 3, 64, 64) ):
"""simple docstring"""
__snake_case = torch.manual_seed(a_ )
__snake_case = {
"num_inference_steps": None,
"timesteps": [22, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
__snake_case = self.get_fixed_latents(seed=a_ , device=a_ , dtype=a_ , shape=a_ )
__snake_case = latents
return inputs
def A ( self : Any , a_ : Optional[int]=0 , a_ : Tuple="cpu" , a_ : List[Any]=torch.floataa , a_ : Optional[Any]=(1, 3, 64, 64) ):
"""simple docstring"""
if type(a_ ) == str:
__snake_case = torch.device(a_ )
__snake_case = torch.Generator(device=a_ ).manual_seed(a_ )
__snake_case = randn_tensor(a_ , generator=a_ , device=a_ , dtype=a_ )
return latents
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
__snake_case = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__snake_case = ConsistencyModelPipeline(unet=a_ , scheduler=a_ )
pipe.to(torch_device=a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = self.get_inputs()
__snake_case = pipe(**a_ ).images
assert image.shape == (1, 64, 64, 3)
__snake_case = image[0, -3:, -3:, -1]
__snake_case = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
__snake_case = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__snake_case = ConsistencyModelPipeline(unet=a_ , scheduler=a_ )
pipe.to(torch_device=a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = self.get_inputs()
__snake_case = 1
__snake_case = None
__snake_case = pipe(**a_ ).images
assert image.shape == (1, 64, 64, 3)
__snake_case = image[0, -3:, -3:, -1]
__snake_case = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def A ( self : str ):
"""simple docstring"""
__snake_case = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
__snake_case = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__snake_case = ConsistencyModelPipeline(unet=a_ , scheduler=a_ )
pipe.to(torch_device=a_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = self.get_inputs(get_fixed_latents=a_ , device=a_ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a_ , enable_math=a_ , enable_mem_efficient=a_ ):
__snake_case = pipe(**a_ ).images
assert image.shape == (1, 64, 64, 3)
__snake_case = image[0, -3:, -3:, -1]
__snake_case = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
__snake_case = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__snake_case = ConsistencyModelPipeline(unet=a_ , scheduler=a_ )
pipe.to(torch_device=a_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = self.get_inputs(get_fixed_latents=a_ , device=a_ )
__snake_case = 1
__snake_case = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a_ , enable_math=a_ , enable_mem_efficient=a_ ):
__snake_case = pipe(**a_ ).images
assert image.shape == (1, 64, 64, 3)
__snake_case = image[0, -3:, -3:, -1]
__snake_case = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 69 |
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
assert isinstance(__snake_case , __snake_case ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
__SCREAMING_SNAKE_CASE = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(__snake_case )
else:
__SCREAMING_SNAKE_CASE = sylvester(number - 1 )
__SCREAMING_SNAKE_CASE = num - 1
__SCREAMING_SNAKE_CASE = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 693 | 0 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 70 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __lowerCAmelCase ( *_a, **_a ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", )
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_a ), [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
], )
@require_tf
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", framework="tf" )
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(_a ), [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
], )
@slow
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", )
# This is an image of 2 cats with remotes and no planes
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_a ), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
@slow
@require_tf
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", framework="tf" )
# This is an image of 2 cats with remotes and no planes
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_a ), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
| 693 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : jnp.ndarray
__A : jnp.ndarray
class _snake_case (nn.Module):
__A : int
__A : Tuple[int] =(16, 32, 96, 2_56)
__A : jnp.dtype =jnp.floataa
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = nn.Conv(
self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
UpperCAmelCase_ : Any = []
for i in range(len(self.block_out_channels ) - 1 ):
UpperCAmelCase_ : str = self.block_out_channels[i]
UpperCAmelCase_ : Tuple = self.block_out_channels[i + 1]
UpperCAmelCase_ : Any = nn.Conv(
_snake_case ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(_snake_case )
UpperCAmelCase_ : Optional[int] = nn.Conv(
_snake_case ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(_snake_case )
UpperCAmelCase_ : Tuple = blocks
UpperCAmelCase_ : str = nn.Conv(
self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self ,_snake_case ):
UpperCAmelCase_ : Dict = self.conv_in(_snake_case )
UpperCAmelCase_ : int = nn.silu(_snake_case )
for block in self.blocks:
UpperCAmelCase_ : Union[str, Any] = block(_snake_case )
UpperCAmelCase_ : Tuple = nn.silu(_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.conv_out(_snake_case )
return embedding
@flax_register_to_config
class _snake_case (nn.Module , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__A : int =32
__A : int =4
__A : Tuple[str] =(
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__A : Union[bool, Tuple[bool]] =False
__A : Tuple[int] =(3_20, 6_40, 12_80, 12_80)
__A : int =2
__A : Union[int, Tuple[int]] =8
__A : Optional[Union[int, Tuple[int]]] =None
__A : int =12_80
__A : float =0.0
__A : bool =False
__A : jnp.dtype =jnp.floataa
__A : bool =True
__A : int =0
__A : str ="rgb"
__A : Tuple[int] =(16, 32, 96, 2_56)
def UpperCamelCase__ ( self ,_snake_case ):
# init input tensors
UpperCAmelCase_ : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
UpperCAmelCase_ : Any = jnp.zeros(_snake_case ,dtype=jnp.floataa )
UpperCAmelCase_ : Tuple = jnp.ones((1,) ,dtype=jnp.intaa )
UpperCAmelCase_ : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa )
UpperCAmelCase_ : Union[str, Any] = (1, 3, self.sample_size * 8, self.sample_size * 8)
UpperCAmelCase_ : int = jnp.zeros(_snake_case ,dtype=jnp.floataa )
UpperCAmelCase_ , UpperCAmelCase_ : str = jax.random.split(_snake_case )
UpperCAmelCase_ : Optional[int] = {"params": params_rng, "dropout": dropout_rng}
return self.init(_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case )["params"]
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.block_out_channels
UpperCAmelCase_ : Union[str, Any] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
UpperCAmelCase_ : Dict = self.num_attention_heads or self.attention_head_dim
# input
UpperCAmelCase_ : str = nn.Conv(
block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
# time
UpperCAmelCase_ : Dict = FlaxTimesteps(
block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift )
UpperCAmelCase_ : List[Any] = FlaxTimestepEmbedding(_snake_case ,dtype=self.dtype )
UpperCAmelCase_ : int = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,)
UpperCAmelCase_ : Any = self.only_cross_attention
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : Union[str, Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : Tuple = (num_attention_heads,) * len(self.down_block_types )
# down
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : str = []
UpperCAmelCase_ : List[str] = block_out_channels[0]
UpperCAmelCase_ : Union[str, Any] = nn.Conv(
_snake_case ,kernel_size=(1, 1) ,padding="VALID" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(_snake_case )
for i, down_block_type in enumerate(self.down_block_types ):
UpperCAmelCase_ : Tuple = output_channel
UpperCAmelCase_ : List[Any] = block_out_channels[i]
UpperCAmelCase_ : int = i == len(_snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
UpperCAmelCase_ : int = FlaxCrossAttnDownBlockaD(
in_channels=_snake_case ,out_channels=_snake_case ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,)
else:
UpperCAmelCase_ : List[Any] = FlaxDownBlockaD(
in_channels=_snake_case ,out_channels=_snake_case ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,)
down_blocks.append(_snake_case )
for _ in range(self.layers_per_block ):
UpperCAmelCase_ : List[Any] = nn.Conv(
_snake_case ,kernel_size=(1, 1) ,padding="VALID" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(_snake_case )
if not is_final_block:
UpperCAmelCase_ : int = nn.Conv(
_snake_case ,kernel_size=(1, 1) ,padding="VALID" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(_snake_case )
UpperCAmelCase_ : int = down_blocks
UpperCAmelCase_ : Union[str, Any] = controlnet_down_blocks
# mid
UpperCAmelCase_ : int = block_out_channels[-1]
UpperCAmelCase_ : Tuple = FlaxUNetMidBlockaDCrossAttn(
in_channels=_snake_case ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,)
UpperCAmelCase_ : List[str] = nn.Conv(
_snake_case ,kernel_size=(1, 1) ,padding="VALID" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case = 1.0 ,_snake_case = True ,_snake_case = False ,):
UpperCAmelCase_ : int = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
UpperCAmelCase_ : Union[str, Any] = jnp.flip(_snake_case ,axis=1 )
# 1. time
if not isinstance(_snake_case ,jnp.ndarray ):
UpperCAmelCase_ : Optional[int] = jnp.array([timesteps] ,dtype=jnp.intaa )
elif isinstance(_snake_case ,jnp.ndarray ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ : str = timesteps.astype(dtype=jnp.floataa )
UpperCAmelCase_ : Optional[int] = jnp.expand_dims(_snake_case ,0 )
UpperCAmelCase_ : str = self.time_proj(_snake_case )
UpperCAmelCase_ : Optional[Any] = self.time_embedding(_snake_case )
# 2. pre-process
UpperCAmelCase_ : Union[str, Any] = jnp.transpose(_snake_case ,(0, 2, 3, 1) )
UpperCAmelCase_ : List[str] = self.conv_in(_snake_case )
UpperCAmelCase_ : Tuple = jnp.transpose(_snake_case ,(0, 2, 3, 1) )
UpperCAmelCase_ : Optional[int] = self.controlnet_cond_embedding(_snake_case )
sample += controlnet_cond
# 3. down
UpperCAmelCase_ : Union[str, Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = down_block(_snake_case ,_snake_case ,_snake_case ,deterministic=not train )
else:
UpperCAmelCase_ , UpperCAmelCase_ : str = down_block(_snake_case ,_snake_case ,deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
UpperCAmelCase_ : str = self.mid_block(_snake_case ,_snake_case ,_snake_case ,deterministic=not train )
# 5. contronet blocks
UpperCAmelCase_ : int = ()
for down_block_res_sample, controlnet_block in zip(_snake_case ,self.controlnet_down_blocks ):
UpperCAmelCase_ : List[str] = controlnet_block(_snake_case )
controlnet_down_block_res_samples += (down_block_res_sample,)
UpperCAmelCase_ : str = controlnet_down_block_res_samples
UpperCAmelCase_ : List[Any] = self.controlnet_mid_block(_snake_case )
# 6. scaling
UpperCAmelCase_ : Any = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_snake_case ,mid_block_res_sample=_snake_case )
| 71 |
from __future__ import annotations
import math
def _A ( __snake_case :int , __snake_case :int , __snake_case :bool , __snake_case :list[int] , __snake_case :float ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__snake_case ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__SCREAMING_SNAKE_CASE = math.log(len(__snake_case ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 693 | 0 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __magic_name__ :
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
lowercase =parent
lowercase =batch_size
lowercase =seq_length
lowercase =is_training
lowercase =use_input_mask
lowercase =use_token_type_ids
lowercase =use_labels
lowercase =vocab_size
lowercase =hidden_size
lowercase =num_hidden_layers
lowercase =num_attention_heads
lowercase =intermediate_size
lowercase =hidden_act
lowercase =hidden_dropout_prob
lowercase =attention_probs_dropout_prob
lowercase =max_position_embeddings
lowercase =type_vocab_size
lowercase =type_sequence_label_size
lowercase =initializer_range
lowercase =num_labels
lowercase =num_choices
lowercase =scope
def _A( self ):
lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase =None
if self.use_input_mask:
lowercase =random_attention_mask([self.batch_size, self.seq_length] )
lowercase =None
if self.use_token_type_ids:
lowercase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase =None
lowercase =None
lowercase =None
if self.use_labels:
lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase =ids_tensor([self.batch_size] , self.num_choices )
lowercase =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A( self ):
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , use_stable_embedding=snake_case_ , )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
lowercase =OpenLlamaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase =model(snake_case_ , attention_mask=snake_case_ )
lowercase =model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
lowercase =True
lowercase =OpenLlamaModel(snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase =model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , )
lowercase =model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , )
lowercase =model(snake_case_ , attention_mask=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
lowercase =OpenLlamaForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase =model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
lowercase =True
lowercase =True
lowercase =OpenLlamaForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
# first forward pass
lowercase =model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , use_cache=snake_case_ , )
lowercase =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase =ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase =torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase =torch.cat([input_mask, next_mask] , dim=-1 )
lowercase =model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , output_hidden_states=snake_case_ , )['''hidden_states'''][0]
lowercase =model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , past_key_values=snake_case_ , output_hidden_states=snake_case_ , )['''hidden_states'''][0]
# select random slice
lowercase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase =output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
def _A( self ):
lowercase =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) =config_and_inputs
lowercase ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCamelCase__ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
UpperCamelCase__ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
UpperCamelCase__ = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def _A( self ):
lowercase =OpenLlamaModelTester(self )
lowercase =ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def _A( self ):
self.config_tester.run_common_tests()
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase =type
self.model_tester.create_and_check_model(*snake_case_ )
def _A( self ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
lowercase =3
lowercase =input_dict['''input_ids''']
lowercase =input_ids.ne(1 ).to(snake_case_ )
lowercase =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase =OpenLlamaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase =model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _A( self ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
lowercase =3
lowercase ='''single_label_classification'''
lowercase =input_dict['''input_ids''']
lowercase =input_ids.ne(1 ).to(snake_case_ )
lowercase =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase =OpenLlamaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase =model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _A( self ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
lowercase =3
lowercase ='''multi_label_classification'''
lowercase =input_dict['''input_ids''']
lowercase =input_ids.ne(1 ).to(snake_case_ )
lowercase =ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase =OpenLlamaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase =model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def _A( self ):
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def _A( self , snake_case_ ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
lowercase =ids_tensor([1, 10] , config.vocab_size )
lowercase =ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase =OpenLlamaModel(snake_case_ )
original_model.to(snake_case_ )
original_model.eval()
lowercase =original_model(snake_case_ ).last_hidden_state
lowercase =original_model(snake_case_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase ={'''type''': scaling_type, '''factor''': 10.0}
lowercase =OpenLlamaModel(snake_case_ )
scaled_model.to(snake_case_ )
scaled_model.eval()
lowercase =scaled_model(snake_case_ ).last_hidden_state
lowercase =scaled_model(snake_case_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
| 72 |
def _A ( __snake_case :bytes ) -> str:
"""simple docstring"""
return "".join([hex(__snake_case )[2:].zfill(2 ).upper() for byte in list(__snake_case )] )
def _A ( __snake_case :str ) -> bytes:
"""simple docstring"""
if (len(__snake_case ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__snake_case ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(__snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 | 0 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a_ : List[str] = '▁'
a_ : str = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _snake_case ( A__ , unittest.TestCase ):
_lowercase : str = BigBirdTokenizer
_lowercase : Any = BigBirdTokenizerFast
_lowercase : Tuple = True
_lowercase : int = True
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
super().setUp()
SCREAMING_SNAKE_CASE = self.tokenizer_class(a , keep_accents=a)
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = '<s>'
SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a) , a)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a) , a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<unk>')
self.assertEqual(vocab_keys[1] , '<s>')
self.assertEqual(vocab_keys[-1] , '[MASK]')
self.assertEqual(len(a) , 1004)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE = tokenizer.tokenize(a)
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(a)
self.assertListEqual(a , a)
SCREAMING_SNAKE_CASE = tokenizer.encode(a , add_special_tokens=a)
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = tokenizer.encode(a)
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(a)
self.assertListEqual(a , a)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = BigBirdTokenizer(a , keep_accents=a)
SCREAMING_SNAKE_CASE = tokenizer.tokenize('This is a test')
self.assertListEqual(a , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a) , [285, 46, 10, 170, 382] , )
SCREAMING_SNAKE_CASE = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(a)
self.assertListEqual(
a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(a)
self.assertListEqual(
a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
return BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base')
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = 'Hello World!'
SCREAMING_SNAKE_CASE = [65, 1_8536, 2260, 101, 66]
self.assertListEqual(a , self.big_tokenizer.encode(a))
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
# fmt: off
SCREAMING_SNAKE_CASE = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(a , self.big_tokenizer.encode(a))
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
SCREAMING_SNAKE_CASE = list(self.big_tokenizer.get_vocab().keys())[:10]
SCREAMING_SNAKE_CASE = ' '.join(a)
SCREAMING_SNAKE_CASE = self.big_tokenizer.encode_plus(a , return_tensors='pt' , return_token_type_ids=a)
SCREAMING_SNAKE_CASE = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=a)
SCREAMING_SNAKE_CASE = BigBirdConfig(attention_type='original_full')
SCREAMING_SNAKE_CASE = BigBirdModel(a)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**a)
model(**a)
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base')
SCREAMING_SNAKE_CASE = tokenizer.decode(tokenizer('Paris is the [MASK].').input_ids)
self.assertTrue(decoded_text == '[CLS] Paris is the[MASK].[SEP]')
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
# fmt: off
SCREAMING_SNAKE_CASE = {'input_ids': [[65, 3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114, 66], [65, 448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='google/bigbird-roberta-base' , revision='215c99f1600e06f83acce68422f2035b2b5c3510' , )
| 73 |
from functools import lru_cache
def _A ( __snake_case :int ) -> set:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__snake_case )
if n > 1:
factors.add(__snake_case )
return factors
@lru_cache
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
return len(unique_prime_factors(__snake_case ) )
def _A ( __snake_case :list ) -> bool:
"""simple docstring"""
return len(set(__snake_case ) ) in (0, 1)
def _A ( __snake_case :int ) -> list:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 2
while True:
# Increment each value of a generated range
__SCREAMING_SNAKE_CASE = [base + i for i in range(__snake_case )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__SCREAMING_SNAKE_CASE = [upf_len(__snake_case ) for x in group]
checker.append(__snake_case )
# If all numbers in the list are equal, return the group variable.
if equality(__snake_case ):
return group
# Increment our base variable by 1
base += 1
def _A ( __snake_case :int = 4 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = run(__snake_case )
return results[0] if len(__snake_case ) else None
if __name__ == "__main__":
print(solution())
| 693 | 0 |
from math import sqrt
def a__ ( snake_case = 1_000_000 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(snake_case , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 74 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _A ( __snake_case :Dict ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = VideoMAEConfig()
set_architecture_configs(__snake_case , __snake_case )
if "finetuned" not in model_name:
__SCREAMING_SNAKE_CASE = False
if "finetuned" in model_name:
__SCREAMING_SNAKE_CASE = "huggingface/label-files"
if "kinetics" in model_name:
__SCREAMING_SNAKE_CASE = 400
__SCREAMING_SNAKE_CASE = "kinetics400-id2label.json"
elif "ssv2" in model_name:
__SCREAMING_SNAKE_CASE = 174
__SCREAMING_SNAKE_CASE = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
__SCREAMING_SNAKE_CASE = {int(__snake_case ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def _A ( __snake_case :Dict , __snake_case :Optional[Any] ) -> List[Any]:
"""simple docstring"""
if "small" in model_name:
__SCREAMING_SNAKE_CASE = 384
__SCREAMING_SNAKE_CASE = 1536
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = 192
__SCREAMING_SNAKE_CASE = 768
elif "large" in model_name:
__SCREAMING_SNAKE_CASE = 1024
__SCREAMING_SNAKE_CASE = 4096
__SCREAMING_SNAKE_CASE = 24
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 512
__SCREAMING_SNAKE_CASE = 2048
elif "huge" in model_name:
__SCREAMING_SNAKE_CASE = 1280
__SCREAMING_SNAKE_CASE = 5120
__SCREAMING_SNAKE_CASE = 32
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 640
__SCREAMING_SNAKE_CASE = 2560
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def _A ( __snake_case :List[Any] ) -> Optional[int]:
"""simple docstring"""
if "encoder." in name:
__SCREAMING_SNAKE_CASE = name.replace("encoder." , "" )
if "cls_token" in name:
__SCREAMING_SNAKE_CASE = name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.self" )
if "attn" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.attention" )
if "norm1" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__SCREAMING_SNAKE_CASE = name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__SCREAMING_SNAKE_CASE = name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace("head" , "classifier" )
return name
def _A ( __snake_case :Union[str, Any] , __snake_case :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = orig_state_dict.pop(__snake_case )
if key.startswith("encoder." ):
__SCREAMING_SNAKE_CASE = key.replace("encoder." , "" )
if "qkv" in key:
__SCREAMING_SNAKE_CASE = key.split("." )
if key.startswith("decoder.blocks" ):
__SCREAMING_SNAKE_CASE = config.decoder_hidden_size
__SCREAMING_SNAKE_CASE = int(key_split[2] )
__SCREAMING_SNAKE_CASE = "decoder.decoder_layers."
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = config.hidden_size
__SCREAMING_SNAKE_CASE = int(key_split[1] )
__SCREAMING_SNAKE_CASE = "videomae.encoder.layer."
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = val
return orig_state_dict
def _A ( ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
__SCREAMING_SNAKE_CASE = np.load(__snake_case )
return list(__snake_case )
def _A ( __snake_case :Optional[int] , __snake_case :List[str] , __snake_case :Union[str, Any] , __snake_case :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_videomae_config(__snake_case )
if "finetuned" in model_name:
__SCREAMING_SNAKE_CASE = VideoMAEForVideoClassification(__snake_case )
else:
__SCREAMING_SNAKE_CASE = VideoMAEForPreTraining(__snake_case )
# download original checkpoint, hosted on Google Drive
__SCREAMING_SNAKE_CASE = "pytorch_model.bin"
gdown.cached_download(__snake_case , __snake_case , quiet=__snake_case )
__SCREAMING_SNAKE_CASE = torch.load(__snake_case , map_location="cpu" )
if "model" in files:
__SCREAMING_SNAKE_CASE = files["model"]
else:
__SCREAMING_SNAKE_CASE = files["module"]
__SCREAMING_SNAKE_CASE = convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
model.eval()
# verify model on basic input
__SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__SCREAMING_SNAKE_CASE = prepare_video()
__SCREAMING_SNAKE_CASE = image_processor(__snake_case , return_tensors="pt" )
if "finetuned" not in model_name:
__SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
__SCREAMING_SNAKE_CASE = torch.load(__snake_case )
__SCREAMING_SNAKE_CASE = model(**__snake_case )
__SCREAMING_SNAKE_CASE = outputs.logits
__SCREAMING_SNAKE_CASE = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] )
elif model_name == "videomae-small-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] )
elif model_name == "videomae-base":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] )
elif model_name == "videomae-base-short":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] )
# we verified the loss both for normalized and unnormalized targets for this one
__SCREAMING_SNAKE_CASE = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] )
elif model_name == "videomae-large":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] )
elif model_name == "videomae-large-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] )
elif model_name == "videomae-huge-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] )
elif model_name == "videomae-base-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] )
elif model_name == "videomae-base-short-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] )
elif model_name == "videomae-base-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] )
elif model_name == "videomae-base-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __snake_case , atol=1e-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__SCREAMING_SNAKE_CASE = outputs.loss
assert torch.allclose(__snake_case , __snake_case , atol=1e-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
model.save_pretrained(__snake_case )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(__snake_case , organization="nielsr" )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_snake_case : Optional[int] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 693 | 0 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : list[list[str]] = [[] for _ in range(lowerCAmelCase__ )]
UpperCAmelCase__ : Union[str, Any] = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(lowerCAmelCase__ ) <= key:
return input_string
for position, character in enumerate(lowerCAmelCase__ ):
UpperCAmelCase__ : Dict = position % (lowest * 2) # puts it in bounds
UpperCAmelCase__ : Tuple = min(lowerCAmelCase__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(lowerCAmelCase__ )
UpperCAmelCase__ : int = [''''''.join(lowerCAmelCase__ ) for row in temp_grid]
UpperCAmelCase__ : Dict = ''''''.join(lowerCAmelCase__ )
return output_string
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : List[Any] = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
UpperCAmelCase__ : list[list[str]] = [[] for _ in range(lowerCAmelCase__ )] # generates template
for position in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase__ : Optional[int] = position % (lowest * 2) # puts it in bounds
UpperCAmelCase__ : Optional[int] = min(lowerCAmelCase__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
UpperCAmelCase__ : Tuple = 0
for row in temp_grid: # fills in the characters
UpperCAmelCase__ : Tuple = input_string[counter : counter + len(lowerCAmelCase__ )]
grid.append(list(lowerCAmelCase__ ) )
counter += len(lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = '''''' # reads as zigzag
for position in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase__ : int = position % (lowest * 2) # puts it in bounds
UpperCAmelCase__ : int = min(lowerCAmelCase__ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def a__ ( lowerCAmelCase__ ) -> dict[int, str]:
UpperCAmelCase__ : Tuple = {}
for key_guess in range(1 , len(lowerCAmelCase__ ) ): # tries every key
UpperCAmelCase__ : Any = decrypt(lowerCAmelCase__ , lowerCAmelCase__ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_snake_case : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
def __init__( self, *_a, **_a ) -> None:
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead.", _a, )
super().__init__(*_a, **_a )
| 693 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=8 ):
__lowercase : Optional[int] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__lowercase : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCAmelCase_ ( snake_case ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , movq=UpperCamelCase_ , )
__lowercase : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
if latents is None:
__lowercase : Optional[int] = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
__lowercase : Optional[Any] = latents.to(UpperCamelCase_ )
__lowercase : Optional[Any] = latents * scheduler.init_noise_sigma
return latents
def _lowerCamelCase ( self , UpperCamelCase_=0 ) -> Optional[int]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__lowercase : Tuple = torch.device(F"""cuda:{gpu_id}""" )
__lowercase : Any = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_=0 ) -> Any:
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
__lowercase : str = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=UpperCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowercase : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__lowercase ,__lowercase : Any = cpu_offload_with_hook(UpperCamelCase_ , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ )
# We'll offload the last model manually.
__lowercase : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowerCamelCase ( self ) -> List[Any]:
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 5_12 , UpperCamelCase_ = 5_12 , UpperCamelCase_ = 1_00 , UpperCamelCase_ = 4.0 , UpperCamelCase_ = 1 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = "pil" , UpperCamelCase_ = True , ) -> Union[str, Any]:
__lowercase : Optional[int] = self._execution_device
__lowercase : int = guidance_scale > 1.0
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowercase : Tuple = torch.cat(UpperCamelCase_ , dim=0 )
__lowercase : List[str] = image_embeds.shape[0] * num_images_per_prompt
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowercase : Any = torch.cat(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
__lowercase : Any = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowercase : List[str] = negative_image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowercase : List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
__lowercase : Dict = self.scheduler.timesteps
__lowercase : List[Any] = self.unet.config.in_channels
__lowercase ,__lowercase : Any = downscale_height_and_width(UpperCamelCase_ , UpperCamelCase_ , self.movq_scale_factor )
# create initial latent
__lowercase : int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
__lowercase : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase : List[Any] = {'''image_embeds''': image_embeds}
__lowercase : Dict = self.unet(
sample=UpperCamelCase_ , timestep=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , added_cond_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
if do_classifier_free_guidance:
__lowercase ,__lowercase : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
__lowercase ,__lowercase : Optional[int] = noise_pred.chunk(2 )
__lowercase ,__lowercase : int = variance_pred.chunk(2 )
__lowercase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowercase : Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowercase ,__lowercase : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowercase : int = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ , )[0]
# post-processing
__lowercase : Dict = self.movq.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
__lowercase : int = image * 0.5 + 0.5
__lowercase : int = image.clamp(0 , 1 )
__lowercase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowercase : Optional[Any] = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 76 |
from math import sqrt
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0
for i in range(1 , int(sqrt(__snake_case ) + 1 ) ):
if n % i == 0 and i != sqrt(__snake_case ):
total += i + n // i
elif i == sqrt(__snake_case ):
total += i
return total - n
def _A ( __snake_case :int = 1_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = sum(
i
for i in range(1 , __snake_case )
if sum_of_divisors(sum_of_divisors(__snake_case ) ) == i and sum_of_divisors(__snake_case ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 693 | 0 |
"""simple docstring"""
A = """Alexander Joslin"""
import operator as op
from .stack import Stack
def _UpperCamelCase ( UpperCamelCase ) -> int:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
__UpperCAmelCase : Stack[int] = Stack()
__UpperCAmelCase : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(UpperCamelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(UpperCamelCase )
elif i == ")":
# RULE 4
__UpperCAmelCase : Dict = operator_stack.peek()
operator_stack.pop()
__UpperCAmelCase : List[str] = operand_stack.peek()
operand_stack.pop()
__UpperCAmelCase : List[Any] = operand_stack.peek()
operand_stack.pop()
__UpperCAmelCase : Any = operators[opr](UpperCamelCase , UpperCamelCase )
operand_stack.push(UpperCamelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
A = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(f'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 77 |
def _A ( __snake_case :int , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
SCREAMING_SNAKE_CASE_: List[str] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Optional[int] ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
SCREAMING_SNAKE_CASE_: Any ={
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE_: int ={
'RUCAIBox/mvp': 10_24,
}
class __A ( UpperCamelCase__ ):
a__ : Optional[int] = VOCAB_FILES_NAMES
a__ : str = PRETRAINED_VOCAB_FILES_MAP
a__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Any = ["""input_ids""", """attention_mask"""]
a__ : List[Any] = MvpTokenizer
def __init__(self : Dict , __a : List[Any]=None , __a : List[Any]=None , __a : Optional[int]=None , __a : Any="replace" , __a : Optional[Any]="<s>" , __a : List[str]="</s>" , __a : int="</s>" , __a : Optional[int]="<s>" , __a : str="<unk>" , __a : str="<pad>" , __a : List[Any]="<mask>" , __a : Tuple=False , __a : str=True , **__a : Optional[int] , ):
super().__init__(
__a , __a , tokenizer_file=__a , errors=__a , bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , trim_offsets=__a , **__a , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __a ) != add_prefix_space:
UpperCAmelCase_ = getattr(__a , pre_tok_state.pop("type" ) )
UpperCAmelCase_ = add_prefix_space
UpperCAmelCase_ = pre_tok_class(**__a )
UpperCAmelCase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase_ = "post_processor"
UpperCAmelCase_ = getattr(self.backend_tokenizer , __a , __a )
if tokenizer_component_instance:
UpperCAmelCase_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase_ = tuple(state["sep"] )
if "cls" in state:
UpperCAmelCase_ = tuple(state["cls"] )
UpperCAmelCase_ = False
if state.get("add_prefix_space" , __a ) != add_prefix_space:
UpperCAmelCase_ = add_prefix_space
UpperCAmelCase_ = True
if state.get("trim_offsets" , __a ) != trim_offsets:
UpperCAmelCase_ = trim_offsets
UpperCAmelCase_ = True
if changes_to_apply:
UpperCAmelCase_ = getattr(__a , state.pop("type" ) )
UpperCAmelCase_ = component_class(**__a )
setattr(self.backend_tokenizer , __a , __a )
@property
def _lowercase (self : List[str] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _lowercase (self : str , __a : str ):
UpperCAmelCase_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else value
UpperCAmelCase_ = value
def _lowercase (self : str , *__a : Dict , **__a : Optional[Any] ):
UpperCAmelCase_ = kwargs.get("is_split_into_words" , __a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__a , **__a )
def _lowercase (self : Optional[int] , *__a : Optional[int] , **__a : int ):
UpperCAmelCase_ = kwargs.get("is_split_into_words" , __a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*__a , **__a )
def _lowercase (self : List[str] , __a : str , __a : Optional[str] = None ):
UpperCAmelCase_ = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def _lowercase (self : Tuple , __a : Any , __a : List[str]=None ):
UpperCAmelCase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowercase (self : Tuple , __a : List[int] , __a : Optional[List[int]] = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 78 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a, _a=99, _a=13, _a=7, _a=9, _a=True, _a=True, _a=False, _a=32, _a=5, _a=4, _a=37, _a=8, _a=0.1, _a=0.002, _a=1, _a=0, _a=0, _a=None, _a=None, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = encoder_seq_length
__SCREAMING_SNAKE_CASE = decoder_seq_length
# For common tests
__SCREAMING_SNAKE_CASE = self.decoder_seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_attention_mask
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = d_ff
__SCREAMING_SNAKE_CASE = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE = dropout_rate
__SCREAMING_SNAKE_CASE = initializer_factor
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = decoder_start_token_id
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = decoder_layers
def __lowerCAmelCase ( self ) -> Optional[int]:
return TaConfig.from_pretrained("google/umt5-base" )
def __lowerCAmelCase ( self, _a, _a, _a, _a=None, _a=None, _a=None, _a=None, _a=None, ) -> int:
if attention_mask is None:
__SCREAMING_SNAKE_CASE = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.num_hidden_layers, config.num_attention_heads, device=_a )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.num_decoder_layers, config.num_attention_heads, device=_a )
if cross_attn_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers, config.num_attention_heads, device=_a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__SCREAMING_SNAKE_CASE = input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = decoder_input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = config.num_attention_heads
__SCREAMING_SNAKE_CASE = self.prepare_inputs_dict(_a, _a, _a )
return config, input_dict
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCAmelCase ( self ) -> Optional[int]:
return TaConfig(
vocab_size=1_66, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return TaConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a )
model.to(_a )
model.eval()
__SCREAMING_SNAKE_CASE = model(
input_ids=_a, decoder_input_ids=_a, attention_mask=_a, decoder_attention_mask=_a, )
__SCREAMING_SNAKE_CASE = model(input_ids=_a, decoder_input_ids=_a )
__SCREAMING_SNAKE_CASE = result.last_hidden_state
__SCREAMING_SNAKE_CASE = result.past_key_values
__SCREAMING_SNAKE_CASE = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_a ), config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ), 4 )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Tuple:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).get_decoder().to(_a ).eval()
# first forward pass
__SCREAMING_SNAKE_CASE = model(_a, use_cache=_a )
__SCREAMING_SNAKE_CASE = model(_a )
__SCREAMING_SNAKE_CASE = model(_a, use_cache=_a )
self.parent.assertTrue(len(_a ) == len(_a ) )
self.parent.assertTrue(len(_a ) == len(_a ) + 1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1), config.vocab_size )
# append to next input_ids and
__SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens], dim=-1 )
__SCREAMING_SNAKE_CASE = model(_a )["last_hidden_state"]
__SCREAMING_SNAKE_CASE = model(_a, past_key_values=_a )["last_hidden_state"]
# select random slice
__SCREAMING_SNAKE_CASE = ids_tensor((1,), output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a, _a, atol=1E-3 ) )
def __lowerCAmelCase ( self, _a, _a, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).to(_a ).half().eval()
__SCREAMING_SNAKE_CASE = model(**_a )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(_a ).any().item() )
@require_torch
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =(
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE__ =(UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ =(
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE__ =[0.8, 0.9]
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = UMTaModel(config_and_inputs[0] ).to(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_a, (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]), f'''{tmpdirname}/t5_test.onnx''', export_params=_a, opset_version=9, input_names=["input_ids", "decoder_input_ids"], )
@unittest.skipIf(torch_device == "cpu", "Cant do half precision" )
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_a )
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = config_and_inputs[0]
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(_a ).eval()
model.to(_a )
__SCREAMING_SNAKE_CASE = {
"head_mask": torch.zeros(config.num_layers, config.num_heads, device=_a ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ),
}
for attn_name, (name, mask) in zip(_a, head_masking.items() ):
__SCREAMING_SNAKE_CASE = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers, config.num_heads, device=_a )
__SCREAMING_SNAKE_CASE = model.generate(
config_and_inputs[1]["input_ids"], num_beams=1, max_length=3, output_attentions=_a, return_dict_in_generate=_a, **_a, )
# We check the state of decoder_attentions and cross_attentions just from the last step
__SCREAMING_SNAKE_CASE = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ), 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def __lowerCAmelCase ( self ) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration.from_pretrained("google/umt5-small", return_dict=_a ).to(_a )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("google/umt5-small", use_fast=_a, legacy=_a )
__SCREAMING_SNAKE_CASE = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
__SCREAMING_SNAKE_CASE = tokenizer(_a, return_tensors="pt", padding=_a ).input_ids
# fmt: off
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(_a, _a )
__SCREAMING_SNAKE_CASE = model.generate(input_ids.to(_a ) )
__SCREAMING_SNAKE_CASE = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_a )
self.assertEqual(_a, _a )
| 693 | 0 |
from __future__ import annotations
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> None:
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = array[indexa], array[indexa]
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> None:
'''simple docstring'''
if length > 1:
UpperCAmelCase__ : Optional[int] = int(length / 2 )
for i in range(__lowerCamelCase , low + middle ):
comp_and_swap(__lowerCamelCase , __lowerCamelCase , i + middle , __lowerCamelCase )
bitonic_merge(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
bitonic_merge(__lowerCamelCase , low + middle , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> None:
'''simple docstring'''
if length > 1:
UpperCAmelCase__ : Dict = int(length / 2 )
bitonic_sort(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , 1 )
bitonic_sort(__lowerCamelCase , low + middle , __lowerCamelCase , 0 )
bitonic_merge(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Dict = input("""Enter numbers separated by a comma:\n""").strip()
SCREAMING_SNAKE_CASE__ : Dict = [int(item.strip()) for item in user_input.split(""",""")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("""\nSorted array in ascending order is: """, end="""""")
print(*unsorted, sep=""", """)
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("""Sorted array in descending order is: """, end="""""")
print(*unsorted, sep=""", """)
| 79 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _A ( __snake_case :BertModel , __snake_case :str , __snake_case :str ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
__SCREAMING_SNAKE_CASE = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__snake_case ):
os.makedirs(__snake_case )
__SCREAMING_SNAKE_CASE = model.state_dict()
def to_tf_var_name(__snake_case :str ):
for patt, repl in iter(__snake_case ):
__SCREAMING_SNAKE_CASE = name.replace(__snake_case , __snake_case )
return f'''bert/{name}'''
def create_tf_var(__snake_case :np.ndarray , __snake_case :str , __snake_case :tf.Session ):
__SCREAMING_SNAKE_CASE = tf.dtypes.as_dtype(tensor.dtype )
__SCREAMING_SNAKE_CASE = tf.get_variable(dtype=__snake_case , shape=tensor.shape , name=__snake_case , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__snake_case )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__SCREAMING_SNAKE_CASE = to_tf_var_name(__snake_case )
__SCREAMING_SNAKE_CASE = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__SCREAMING_SNAKE_CASE = torch_tensor.T
__SCREAMING_SNAKE_CASE = create_tf_var(tensor=__snake_case , name=__snake_case , session=__snake_case )
tf.keras.backend.set_value(__snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = session.run(__snake_case )
print(f'''Successfully created {tf_name}: {np.allclose(__snake_case , __snake_case )}''' )
__SCREAMING_SNAKE_CASE = tf.train.Saver(tf.trainable_variables() )
saver.save(__snake_case , os.path.join(__snake_case , model_name.replace("-" , "_" ) + ".ckpt" ) )
def _A ( __snake_case :str=None ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=__snake_case , required=__snake_case , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=__snake_case , default=__snake_case , required=__snake_case , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=__snake_case , required=__snake_case , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=__snake_case , required=__snake_case , help="Directory in which to save tensorflow model" )
__SCREAMING_SNAKE_CASE = parser.parse_args(__snake_case )
__SCREAMING_SNAKE_CASE = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__snake_case , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 693 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __UpperCamelCase :
__snake_case :int
__snake_case :int
class __UpperCamelCase :
def __init__( self : Any , _lowerCAmelCase : int ) -> List[str]:
"""simple docstring"""
__lowercase = [[] for _ in range(_lowerCAmelCase )]
__lowercase = size
def __getitem__( self : Tuple , _lowerCAmelCase : int ) -> Iterator[Edge]:
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def _a ( self : str ) -> Dict:
"""simple docstring"""
return self._size
def _a ( self : Dict , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> Tuple:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(_lowerCAmelCase , _lowerCAmelCase ) )
def _a ( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> int | None:
"""simple docstring"""
__lowercase = deque([start_vertex] )
__lowercase = [None] * self.size
__lowercase = 0
while queue:
__lowercase = queue.popleft()
__lowercase = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__lowercase = current_distance + edge.weight
__lowercase = distances[edge.destination_vertex]
if (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and new_distance >= dest_vertex_distance
):
continue
__lowercase = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_snake_case : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =["""input_values""", """padding_mask"""]
def __init__( self, _a = 1, _a = 2_40_00, _a = 0.0, _a = None, _a = None, **_a, ) -> str:
super().__init__(feature_size=_a, sampling_rate=_a, padding_value=_a, **_a )
__SCREAMING_SNAKE_CASE = chunk_length_s
__SCREAMING_SNAKE_CASE = overlap
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1, int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self, _a, _a = None, _a = False, _a = None, _a = None, _a = None, ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = bool(
isinstance(_a, (list, tuple) ) and (isinstance(raw_audio[0], (np.ndarray, tuple, list) )) )
if is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray(_a, dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_a, np.ndarray ):
__SCREAMING_SNAKE_CASE = np.asarray(_a, dtype=np.floataa )
elif isinstance(_a, np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray(_a ).T]
# verify inputs are valid
for idx, example in enumerate(_a ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
__SCREAMING_SNAKE_CASE = min(array.shape[0] for array in raw_audio )
__SCREAMING_SNAKE_CASE = int(np.floor(max_length / self.chunk_stride ) )
__SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
__SCREAMING_SNAKE_CASE = max(array.shape[0] for array in raw_audio )
__SCREAMING_SNAKE_CASE = int(np.ceil(max_length / self.chunk_stride ) )
__SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length
__SCREAMING_SNAKE_CASE = "max_length"
else:
__SCREAMING_SNAKE_CASE = input_values
# normal padding on batch
if padded_inputs is None:
__SCREAMING_SNAKE_CASE = self.pad(
_a, max_length=_a, truncation=_a, padding=_a, return_attention_mask=_a, )
if padding:
__SCREAMING_SNAKE_CASE = padded_inputs.pop("attention_mask" )
__SCREAMING_SNAKE_CASE = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
__SCREAMING_SNAKE_CASE = example[..., None]
input_values.append(example.T )
__SCREAMING_SNAKE_CASE = input_values
if return_tensors is not None:
__SCREAMING_SNAKE_CASE = padded_inputs.convert_to_tensors(_a )
return padded_inputs
| 693 | 0 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class a (TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : Any=None , **lowerCamelCase : Any ) -> List[Any]:
super().__init__(features=lowerCamelCase )
__snake_case : Tuple = torch_tensor_kwargs
import torch # noqa import torch at initialization
def __snake_case ( self : Union[str, Any] , lowerCamelCase : List[Any] ) -> List[str]:
import torch
if isinstance(lowerCamelCase , lowerCamelCase ) and column:
if all(
isinstance(lowerCamelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(lowerCamelCase )
return column
def __snake_case ( self : List[Any] , lowerCamelCase : str ) -> str:
import torch
if isinstance(lowerCamelCase , (str, bytes, type(lowerCamelCase )) ):
return value
elif isinstance(lowerCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__snake_case : Optional[int] = {}
if isinstance(lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__snake_case : Tuple = {"dtype": torch.intaa}
elif isinstance(lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__snake_case : List[Any] = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCamelCase , PIL.Image.Image ):
__snake_case : List[Any] = np.asarray(lowerCamelCase )
return torch.tensor(lowerCamelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def __snake_case ( self : Optional[int] , lowerCamelCase : List[Any] ) -> List[Any]:
import torch
# support for torch, tf, jax etc.
if hasattr(lowerCamelCase , "__array__" ) and not isinstance(lowerCamelCase , torch.Tensor ):
__snake_case : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCamelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCamelCase ) for substruct in data_struct] )
elif isinstance(lowerCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowerCamelCase ) for substruct in data_struct] )
return self._tensorize(lowerCamelCase )
def __snake_case ( self : Tuple , lowerCamelCase : dict ) -> List[str]:
return map_nested(self._recursive_tensorize , lowerCamelCase , map_list=lowerCamelCase )
def __snake_case ( self : Tuple , lowerCamelCase : pa.Table ) -> Mapping:
__snake_case : Union[str, Any] = self.numpy_arrow_extractor().extract_row(lowerCamelCase )
__snake_case : Tuple = self.python_features_decoder.decode_row(lowerCamelCase )
return self.recursive_tensorize(lowerCamelCase )
def __snake_case ( self : List[Any] , lowerCamelCase : pa.Table ) -> "torch.Tensor":
__snake_case : Optional[int] = self.numpy_arrow_extractor().extract_column(lowerCamelCase )
__snake_case : List[Any] = self.python_features_decoder.decode_column(lowerCamelCase , pa_table.column_names[0] )
__snake_case : str = self.recursive_tensorize(lowerCamelCase )
__snake_case : Optional[int] = self._consolidate(lowerCamelCase )
return column
def __snake_case ( self : Tuple , lowerCamelCase : pa.Table ) -> Mapping:
__snake_case : str = self.numpy_arrow_extractor().extract_batch(lowerCamelCase )
__snake_case : List[str] = self.python_features_decoder.decode_batch(lowerCamelCase )
__snake_case : Dict = self.recursive_tensorize(lowerCamelCase )
for column_name in batch:
__snake_case : Optional[int] = self._consolidate(batch[column_name] )
return batch
| 81 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =42
SCREAMING_SNAKE_CASE__ =42
def __init__( self, _a, _a ) -> Dict:
super().__init__()
self.register_modules(unet=_a, scheduler=_a )
@torch.no_grad()
def __call__( self, _a = 1, _a = 20_00, _a = None, _a = "pil", _a = True, **_a, ) -> Union[ImagePipelineOutput, Tuple]:
__SCREAMING_SNAKE_CASE = self.unet.config.sample_size
__SCREAMING_SNAKE_CASE = (batch_size, 3, img_size, img_size)
__SCREAMING_SNAKE_CASE = self.unet
__SCREAMING_SNAKE_CASE = randn_tensor(_a, generator=_a ) * self.scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE = sample.to(self.device )
self.scheduler.set_timesteps(_a )
self.scheduler.set_sigmas(_a )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__SCREAMING_SNAKE_CASE = self.scheduler.sigmas[i] * torch.ones(shape[0], device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__SCREAMING_SNAKE_CASE = self.unet(_a, _a ).sample
__SCREAMING_SNAKE_CASE = self.scheduler.step_correct(_a, _a, generator=_a ).prev_sample
# prediction step
__SCREAMING_SNAKE_CASE = model(_a, _a ).sample
__SCREAMING_SNAKE_CASE = self.scheduler.step_pred(_a, _a, _a, generator=_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output.prev_sample, output.prev_sample_mean
__SCREAMING_SNAKE_CASE = sample_mean.clamp(0, 1 )
__SCREAMING_SNAKE_CASE = sample.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE = self.numpy_to_pil(_a )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_a )
| 693 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"""
),
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''longformer'''
def __init__( self : str , _UpperCAmelCase : Union[List[int], int] = 512 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : int = 30522 , _UpperCAmelCase : int = 768 , _UpperCAmelCase : int = 12 , _UpperCAmelCase : int = 12 , _UpperCAmelCase : int = 3072 , _UpperCAmelCase : str = "gelu" , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : float = 1e-12 , _UpperCAmelCase : bool = False , **_UpperCAmelCase : List[str] , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = attention_window
UpperCAmelCase_ = sep_token_id
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = onnx_export
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : "PretrainedConfig" , _UpperCAmelCase : str = "default" , _UpperCAmelCase : "List[PatchingSpec]" = None ) -> Any:
'''simple docstring'''
super().__init__(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = True
@property
def lowercase__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def lowercase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
UpperCAmelCase_ = super().outputs
if self.task == "default":
UpperCAmelCase_ = {0: "batch"}
return outputs
@property
def lowercase__ ( self : List[str] ) -> float:
'''simple docstring'''
return 1e-4
@property
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : "PreTrainedTokenizerBase" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = super().generate_dummy_inputs(
preprocessor=_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
UpperCAmelCase_ = torch.zeros_like(inputs["input_ids"] )
# make every second token global
UpperCAmelCase_ = 1
return inputs
| 82 |
def _A ( __snake_case :int = 400_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__snake_case )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = b, a + b
return sum(__snake_case )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 693 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.txt'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
lowerCAmelCase__ = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
lowerCAmelCase__ = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class __snake_case ( _lowercase):
snake_case__ : List[str] = VOCAB_FILES_NAMES
snake_case__ : int = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[str] = PRETRAINED_INIT_CONFIGURATION
snake_case__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Optional[int] = ConvBertTokenizer
def __init__( self : Dict , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=True , __lowerCAmelCase : Dict="[UNK]" , __lowerCAmelCase : Any="[SEP]" , __lowerCAmelCase : Dict="[PAD]" , __lowerCAmelCase : Any="[CLS]" , __lowerCAmelCase : Tuple="[MASK]" , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Dict=None , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , tokenize_chinese_chars=__lowerCAmelCase , strip_accents=__lowerCAmelCase , **__lowerCAmelCase , )
_lowerCamelCase : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __lowerCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __lowerCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __lowerCAmelCase ) != tokenize_chinese_chars
):
_lowerCamelCase : int = getattr(__lowerCAmelCase , normalizer_state.pop('''type''' ) )
_lowerCamelCase : Tuple = do_lower_case
_lowerCamelCase : str = strip_accents
_lowerCamelCase : List[str] = tokenize_chinese_chars
_lowerCamelCase : Dict = normalizer_class(**__lowerCAmelCase )
_lowerCamelCase : str = do_lower_case
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any]=None ):
"""simple docstring"""
_lowerCamelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = [self.sep_token_id]
_lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
| 83 |
from __future__ import annotations
_snake_case : str = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_snake_case : Optional[int] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = len(__snake_case )
for i in range(__snake_case ):
__SCREAMING_SNAKE_CASE = -1
for j in range(i + 1 , __snake_case ):
if arr[i] < arr[j]:
__SCREAMING_SNAKE_CASE = arr[j]
break
result.append(__snake_case )
return result
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for i, outer in enumerate(__snake_case ):
__SCREAMING_SNAKE_CASE = -1
for inner in arr[i + 1 :]:
if outer < inner:
__SCREAMING_SNAKE_CASE = inner
break
result.append(__snake_case )
return result
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = len(__snake_case )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [-1] * arr_size
for index in reversed(range(__snake_case ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__SCREAMING_SNAKE_CASE = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_snake_case : Optional[Any] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 693 | 0 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = FileLock(str(tmpdir / 'foo.lock' ) )
lowercase = FileLock(str(tmpdir / 'foo.lock' ) )
lowercase = 0.01
with locka.acquire():
with pytest.raises(__SCREAMING_SNAKE_CASE ):
lowercase = time.time()
locka.acquire(__SCREAMING_SNAKE_CASE )
assert time.time() - _start > timeout
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = 'a' * 1000 + '.lock'
lowercase = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(__SCREAMING_SNAKE_CASE )
assert len(os.path.basename(locka._lock_file ) ) <= 255
lowercase = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__SCREAMING_SNAKE_CASE ):
locka.acquire(0 )
| 84 |
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a ) -> Any:
__SCREAMING_SNAKE_CASE = data
__SCREAMING_SNAKE_CASE = None
def __repr__( self ) -> str:
return f'''Node({self.data})'''
class __SCREAMING_SNAKE_CASE :
def __init__( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = None
def __iter__( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.head
while node:
yield node.data
__SCREAMING_SNAKE_CASE = node.next
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join([str(_a ) for item in self] )
def __getitem__( self, _a ) -> Any:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self, _a, _a ) -> None:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
__SCREAMING_SNAKE_CASE = self.head
for _ in range(_a ):
__SCREAMING_SNAKE_CASE = current.next
__SCREAMING_SNAKE_CASE = data
def __lowerCAmelCase ( self, _a ) -> None:
self.insert_nth(len(self ), _a )
def __lowerCAmelCase ( self, _a ) -> None:
self.insert_nth(0, _a )
def __lowerCAmelCase ( self, _a, _a ) -> None:
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
__SCREAMING_SNAKE_CASE = Node(_a )
if self.head is None:
__SCREAMING_SNAKE_CASE = new_node
elif index == 0:
__SCREAMING_SNAKE_CASE = self.head # link new_node to head
__SCREAMING_SNAKE_CASE = new_node
else:
__SCREAMING_SNAKE_CASE = self.head
for _ in range(index - 1 ):
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = new_node
def __lowerCAmelCase ( self ) -> None: # print every node data
print(self )
def __lowerCAmelCase ( self ) -> Any:
return self.delete_nth(0 )
def __lowerCAmelCase ( self ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def __lowerCAmelCase ( self, _a = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
__SCREAMING_SNAKE_CASE = self.head # default first node
if index == 0:
__SCREAMING_SNAKE_CASE = self.head.next
else:
__SCREAMING_SNAKE_CASE = self.head
for _ in range(index - 1 ):
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next.next
return delete_node.data
def __lowerCAmelCase ( self ) -> bool:
return self.head is None
def __lowerCAmelCase ( self ) -> None:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = self.head
while current:
# Store the current node's next node.
__SCREAMING_SNAKE_CASE = current.next
# Make the current node's next point backwards
__SCREAMING_SNAKE_CASE = prev
# Make the previous node be the current node
__SCREAMING_SNAKE_CASE = current
# Make the current node the next node (to progress iteration)
__SCREAMING_SNAKE_CASE = next_node
# Return prev in order to put the head at the end
__SCREAMING_SNAKE_CASE = prev
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = LinkedList()
assert linked_list.is_empty() is True
assert str(__snake_case ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__snake_case ) == i
linked_list.insert_nth(__snake_case , i + 1 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__snake_case ) == 9
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__SCREAMING_SNAKE_CASE = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(-8 , 1 ) )
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [
-9,
100,
Node(7734_5112 ),
"dlrow olleH",
7,
5555,
0,
-1_9_2.5_5_5_5_5,
"Hello, world!",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
__SCREAMING_SNAKE_CASE = LinkedList()
for i in test_input:
linked_list.insert_tail(__snake_case )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__SCREAMING_SNAKE_CASE = linked_list.delete_head()
assert result == -9
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__SCREAMING_SNAKE_CASE = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__SCREAMING_SNAKE_CASE = linked_list.delete_nth(10 )
assert result is None
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__snake_case )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__snake_case )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _A ( ) -> Union[str, Any]:
"""simple docstring"""
from doctest import testmod
testmod()
__SCREAMING_SNAKE_CASE = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(__snake_case )
print("\nReading/changing Node data using indexing:" )
print(f'''Element at Position 1: {linked_list[1]}''' )
__SCREAMING_SNAKE_CASE = input("Enter New Value: " ).strip()
print("New list:" )
print(__snake_case )
print(f'''length of linked_list is : {len(__snake_case )}''' )
if __name__ == "__main__":
main()
| 693 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class snake_case ( UpperCamelCase_ ):
lowercase_ = 'deta'
lowercase_ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Union[str, Any] , a_ : Dict=None , a_ : Tuple=900 , a_ : Any=2048 , a_ : List[str]=6 , a_ : int=2048 , a_ : Union[str, Any]=8 , a_ : List[Any]=6 , a_ : List[Any]=1024 , a_ : Union[str, Any]=8 , a_ : List[Any]=0.0 , a_ : List[Any]=True , a_ : str="relu" , a_ : Any=256 , a_ : Optional[Any]=0.1 , a_ : Dict=0.0 , a_ : Union[str, Any]=0.0 , a_ : Optional[int]=0.02 , a_ : Optional[Any]=1.0 , a_ : Dict=True , a_ : int=False , a_ : List[str]="sine" , a_ : Dict=5 , a_ : Tuple=4 , a_ : Union[str, Any]=4 , a_ : Dict=True , a_ : str=300 , a_ : Union[str, Any]=True , a_ : List[Any]=True , a_ : List[Any]=1 , a_ : List[str]=5 , a_ : Optional[int]=2 , a_ : List[str]=1 , a_ : Dict=1 , a_ : List[str]=5 , a_ : List[Any]=2 , a_ : Union[str, Any]=0.1 , a_ : int=0.25 , **a_ : List[str] , )-> int:
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
SCREAMING_SNAKE_CASE__ : Optional[int] = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'] )
else:
if isinstance(a_ , a_ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = backbone_config.pop('model_type' )
SCREAMING_SNAKE_CASE__ : Dict = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE__ : List[Any] = config_class.from_dict(a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = backbone_config
SCREAMING_SNAKE_CASE__ : Any = num_queries
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = d_model
SCREAMING_SNAKE_CASE__ : Optional[Any] = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoder_layers
SCREAMING_SNAKE_CASE__ : str = encoder_attention_heads
SCREAMING_SNAKE_CASE__ : str = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_layers
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : str = dropout
SCREAMING_SNAKE_CASE__ : Dict = attention_dropout
SCREAMING_SNAKE_CASE__ : Optional[int] = activation_dropout
SCREAMING_SNAKE_CASE__ : int = activation_function
SCREAMING_SNAKE_CASE__ : List[Any] = init_std
SCREAMING_SNAKE_CASE__ : List[Any] = init_xavier_std
SCREAMING_SNAKE_CASE__ : str = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : List[str] = auxiliary_loss
SCREAMING_SNAKE_CASE__ : Tuple = position_embedding_type
# deformable attributes
SCREAMING_SNAKE_CASE__ : List[Any] = num_feature_levels
SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_n_points
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_n_points
SCREAMING_SNAKE_CASE__ : Any = two_stage
SCREAMING_SNAKE_CASE__ : Union[str, Any] = two_stage_num_proposals
SCREAMING_SNAKE_CASE__ : Any = with_box_refine
SCREAMING_SNAKE_CASE__ : Union[str, Any] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : Dict = class_cost
SCREAMING_SNAKE_CASE__ : Optional[int] = bbox_cost
SCREAMING_SNAKE_CASE__ : int = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : Any = mask_loss_coefficient
SCREAMING_SNAKE_CASE__ : Optional[int] = dice_loss_coefficient
SCREAMING_SNAKE_CASE__ : int = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : Optional[int] = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : str = eos_coefficient
SCREAMING_SNAKE_CASE__ : List[str] = focal_alpha
super().__init__(is_encoder_decoder=a_ , **a_ )
@property
def __lowercase( self : Optional[int] )-> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def __lowercase( self : Optional[Any] )-> int:
"""simple docstring"""
return self.d_model
def __lowercase( self : Optional[int] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ : Tuple = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE__ : Dict = self.__class__.model_type
return output
| 85 |
import argparse
import json
from tqdm import tqdm
def _A ( ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=__snake_case , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=__snake_case , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=__snake_case , help="where to store parsed gold_data_path file" , )
__SCREAMING_SNAKE_CASE = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
__SCREAMING_SNAKE_CASE = json.load(__snake_case )
for dpr_record in tqdm(__snake_case ):
__SCREAMING_SNAKE_CASE = dpr_record["question"]
__SCREAMING_SNAKE_CASE = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(__snake_case ) + "\n" )
if __name__ == "__main__":
main()
| 693 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : str = StableDiffusionSAGPipeline
_lowerCamelCase : Any = TEXT_TO_IMAGE_PARAMS
_lowerCamelCase : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase : Tuple = False
def __A ( self : Tuple ):
torch.manual_seed(0 )
A_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
A_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , )
torch.manual_seed(0 )
A_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ = CLIPTextModel(UpperCAmelCase )
A_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
A_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __A ( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[str]=0 ):
if str(UpperCAmelCase ).startswith("mps" ):
A_ = torch.manual_seed(UpperCAmelCase )
else:
A_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
A_ = {
"prompt": ".",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 1.0,
"sag_scale": 1.0,
"output_type": "numpy",
}
return inputs
def __A ( self : Tuple ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : int ):
A_ = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
A_ = sag_pipe.to(UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=UpperCAmelCase )
A_ = "."
A_ = torch.manual_seed(0 )
A_ = sag_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ = np.array([0.1_568, 0.1_738, 0.1_695, 0.1_693, 0.1_507, 0.1_705, 0.1_547, 0.1_751, 0.1_949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def __A ( self : Optional[Any] ):
A_ = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
A_ = sag_pipe.to(UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=UpperCAmelCase )
A_ = "."
A_ = torch.manual_seed(0 )
A_ = sag_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ = np.array([0.3_459, 0.2_876, 0.2_537, 0.3_002, 0.2_671, 0.2_160, 0.3_026, 0.2_262, 0.2_371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def __A ( self : Tuple ):
A_ = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
A_ = sag_pipe.to(UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=UpperCAmelCase )
A_ = "."
A_ = torch.manual_seed(0 )
A_ = sag_pipe(
[prompt] , width=768 , height=512 , generator=UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" , )
A_ = output.images
assert image.shape == (1, 512, 768, 3) | 86 |
def _A ( __snake_case :int = 10**9 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 693 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Dict = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''table-transformer'''
UpperCAmelCase__ = ['''past_key_values''']
UpperCAmelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Optional[Any] , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : Any=100 , UpperCAmelCase__ : int=6 , UpperCAmelCase__ : List[str]=2_048 , UpperCAmelCase__ : List[Any]=8 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : Optional[int]=2_048 , UpperCAmelCase__ : str=8 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : str="relu" , UpperCAmelCase__ : Tuple=256 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : int=0.0 , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : List[Any]=1.0 , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : Optional[int]="sine" , UpperCAmelCase__ : Tuple="resnet50" , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Optional[int]=1 , UpperCAmelCase__ : List[str]=5 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : str=5 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Optional[Any]=0.1 , **UpperCAmelCase__ : Any , ) ->List[str]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
A__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = backbone_config.get('''model_type''')
A__ = CONFIG_MAPPING[backbone_model_type]
A__ = config_class.from_dict(UpperCAmelCase__)
# set timm attributes to None
A__ , A__ , A__ = None, None, None
A__ = use_timm_backbone
A__ = backbone_config
A__ = num_channels
A__ = num_queries
A__ = d_model
A__ = encoder_ffn_dim
A__ = encoder_layers
A__ = encoder_attention_heads
A__ = decoder_ffn_dim
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = activation_function
A__ = init_std
A__ = init_xavier_std
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = encoder_layers
A__ = auxiliary_loss
A__ = position_embedding_type
A__ = backbone
A__ = use_pretrained_backbone
A__ = dilation
# Hungarian matcher
A__ = class_cost
A__ = bbox_cost
A__ = giou_cost
# Loss coefficients
A__ = mask_loss_coefficient
A__ = dice_loss_coefficient
A__ = bbox_loss_coefficient
A__ = giou_loss_coefficient
A__ = eos_coefficient
super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__)
@property
def SCREAMING_SNAKE_CASE ( self : Any) ->int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int:
'''simple docstring'''
return self.d_model
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self : int) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
])
@property
def SCREAMING_SNAKE_CASE ( self : List[str]) ->float:
'''simple docstring'''
return 1e-5
@property
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
'''simple docstring'''
return 12
| 87 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_snake_case , _snake_case , _snake_case : List[Any] = False, False, False
@dataclass
class __SCREAMING_SNAKE_CASE :
SCREAMING_SNAKE_CASE__ =None
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =None
# Automatically constructed
SCREAMING_SNAKE_CASE__ ="dict"
SCREAMING_SNAKE_CASE__ =pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
SCREAMING_SNAKE_CASE__ =field(default="""Audio""" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self ) -> Optional[int]:
return self.pa_type
def __lowerCAmelCase ( self, _a ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(_a, _a ):
return {"bytes": None, "path": value}
elif isinstance(_a, _a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__SCREAMING_SNAKE_CASE = BytesIO()
sf.write(_a, value["array"], value["sampling_rate"], format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__SCREAMING_SNAKE_CASE = np.frombuffer(value["bytes"], dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
__SCREAMING_SNAKE_CASE = np.memmap(value["path"], dtype="h", mode="r" ).astype(np.floataa ) / 3_27_67
__SCREAMING_SNAKE_CASE = BytesIO(bytes() )
sf.write(_a, _a, value["sampling_rate"], format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __lowerCAmelCase ( self, _a, _a = None ) -> dict:
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
__SCREAMING_SNAKE_CASE = xsplitext(_a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
__SCREAMING_SNAKE_CASE = token_per_repo_id or {}
__SCREAMING_SNAKE_CASE = path.split("::" )[-1]
try:
__SCREAMING_SNAKE_CASE = string_to_dict(_a, config.HUB_DATASETS_URL )["repo_id"]
__SCREAMING_SNAKE_CASE = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__SCREAMING_SNAKE_CASE = None
with xopen(_a, "rb", use_auth_token=_a ) as f:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a )
__SCREAMING_SNAKE_CASE = array.T
if self.mono:
__SCREAMING_SNAKE_CASE = librosa.to_mono(_a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
__SCREAMING_SNAKE_CASE = librosa.resample(_a, orig_sr=_a, target_sr=self.sampling_rate )
__SCREAMING_SNAKE_CASE = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __lowerCAmelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def __lowerCAmelCase ( self, _a ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
__SCREAMING_SNAKE_CASE = pa.array([Audio().encode_example(_a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
__SCREAMING_SNAKE_CASE = storage.field("bytes" )
else:
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
__SCREAMING_SNAKE_CASE = storage.field("path" )
else:
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
return array_cast(_a, self.pa_type )
def __lowerCAmelCase ( self, _a ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_a ):
with xopen(_a, "rb" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
return bytes_
__SCREAMING_SNAKE_CASE = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
__SCREAMING_SNAKE_CASE = pa.array(
[os.path.basename(_a ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(_a, self.pa_type )
| 693 | 0 |
"""simple docstring"""
UpperCAmelCase = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
UpperCAmelCase = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
UpperCAmelCase = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
UpperCAmelCase = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
UpperCAmelCase = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
UpperCAmelCase = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
UpperCAmelCase = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
UpperCAmelCase = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 88 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =(IPNDMScheduler,)
SCREAMING_SNAKE_CASE__ =(("""num_inference_steps""", 50),)
def __lowerCAmelCase ( self, **_a ) -> str:
__SCREAMING_SNAKE_CASE = {"num_train_timesteps": 10_00}
config.update(**_a )
return config
def __lowerCAmelCase ( self, _a=0, **_a ) -> List[Any]:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
if time_step is None:
__SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
__SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a )
new_scheduler.set_timesteps(_a )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self ) -> str:
pass
def __lowerCAmelCase ( self, _a=0, **_a ) -> int:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals (must be after setting timesteps)
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
if time_step is None:
__SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
__SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a )
# copy over dummy past residuals
new_scheduler.set_timesteps(_a )
# copy over dummy past residual (must be after setting timesteps)
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self, **_a ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE = 10
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(_a )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE = model(_a, _a )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE = model(_a, _a )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample
return sample
def __lowerCAmelCase ( self ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
if num_inference_steps is not None and hasattr(_a, "set_timesteps" ):
scheduler.set_timesteps(_a )
elif num_inference_steps is not None and not hasattr(_a, "set_timesteps" ):
__SCREAMING_SNAKE_CASE = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.timesteps[5]
__SCREAMING_SNAKE_CASE = scheduler.timesteps[6]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def __lowerCAmelCase ( self ) -> str:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a, time_step=_a )
def __lowerCAmelCase ( self ) -> Optional[Any]:
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=_a, time_step=_a )
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.full_loop()
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 693 | 0 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
@add_end_docstrings(_a )
class _lowerCamelCase( _a ):
def __init__( self, **lowerCamelCase) -> List[str]:
"""simple docstring"""
super().__init__(**lowerCamelCase)
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''')
requires_backends(self, 'vision')
self.check_model_type(lowerCamelCase)
def __call__( self, lowerCamelCase, lowerCamelCase = None, **lowerCamelCase, ) -> Dict:
"""simple docstring"""
if "text_queries" in kwargs:
_lowercase : List[str] = kwargs.pop('text_queries')
if isinstance(lowerCamelCase, (str, Image.Image)):
_lowercase : Tuple = {'image': image, 'candidate_labels': candidate_labels}
else:
_lowercase : Optional[Any] = image
_lowercase : Union[str, Any] = super().__call__(lowerCamelCase, **lowerCamelCase)
return results
def UpperCamelCase ( self, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = {}
if "threshold" in kwargs:
_lowercase : Optional[int] = kwargs['threshold']
if "top_k" in kwargs:
_lowercase : str = kwargs['top_k']
return {}, {}, postprocess_params
def UpperCamelCase ( self, lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = load_image(inputs['image'])
_lowercase : Union[str, Any] = inputs['candidate_labels']
if isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : Tuple = candidate_labels.split(',')
_lowercase : List[Any] = torch.tensor([[image.height, image.width]], dtype=torch.intaa)
for i, candidate_label in enumerate(lowerCamelCase):
_lowercase : List[Any] = self.tokenizer(lowerCamelCase, return_tensors=self.framework)
_lowercase : List[str] = self.image_processor(lowerCamelCase, return_tensors=self.framework)
yield {
"is_last": i == len(lowerCamelCase) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCamelCase ( self, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = model_inputs.pop('target_size')
_lowercase : Tuple = model_inputs.pop('candidate_label')
_lowercase : Dict = model_inputs.pop('is_last')
_lowercase : int = self.model(**lowerCamelCase)
_lowercase : int = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0.1, lowerCamelCase=None) -> Any:
"""simple docstring"""
_lowercase : Dict = []
for model_output in model_outputs:
_lowercase : int = model_output['candidate_label']
_lowercase : Optional[int] = BaseModelOutput(lowerCamelCase)
_lowercase : int = self.image_processor.post_process_object_detection(
outputs=lowerCamelCase, threshold=lowerCamelCase, target_sizes=model_output['target_size'])[0]
for index in outputs["scores"].nonzero():
_lowercase : Optional[int] = outputs['scores'][index].item()
_lowercase : Union[str, Any] = self._get_bounding_box(outputs['boxes'][index][0])
_lowercase : int = {'score': score, 'label': label, 'box': box}
results.append(lowerCamelCase)
_lowercase : str = sorted(lowerCamelCase, key=lambda lowerCamelCase: x["score"], reverse=lowerCamelCase)
if top_k:
_lowercase : int = results[:top_k]
return results
def UpperCamelCase ( self, lowerCamelCase) -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.')
_lowercase , _lowercase , _lowercase , _lowercase : Optional[Any] = box.int().tolist()
_lowercase : Tuple = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 89 |
import random
from .binary_exp_mod import bin_exp_mod
def _A ( __snake_case :List[Any] , __snake_case :Union[str, Any]=1000 ) -> int:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__SCREAMING_SNAKE_CASE = n - 1
__SCREAMING_SNAKE_CASE = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__SCREAMING_SNAKE_CASE = 0
while count < prec:
__SCREAMING_SNAKE_CASE = random.randint(2 , n - 1 )
__SCREAMING_SNAKE_CASE = bin_exp_mod(__snake_case , __snake_case , __snake_case )
if b != 1:
__SCREAMING_SNAKE_CASE = True
for _ in range(__snake_case ):
if b == n - 1:
__SCREAMING_SNAKE_CASE = False
break
__SCREAMING_SNAKE_CASE = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_snake_case : int = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 693 | 0 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_00 , lowerCamelCase_=13 , lowerCamelCase_=30 , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=32 , lowerCamelCase_=4 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=10 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=None , lowerCamelCase_=[0, 1, 2, 3] , ) -> List[str]:
lowerCAmelCase__ = parent
lowerCAmelCase__ = 1_00
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = scope
lowerCAmelCase__ = out_indices
lowerCAmelCase__ = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ = (image_size // patch_size) ** 2
lowerCAmelCase__ = num_patches + 1
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
lowerCAmelCase__ = BeitModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
lowerCAmelCase__ = BeitForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
lowerCAmelCase__ = self.type_sequence_label_size
lowerCAmelCase__ = BeitForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = BeitForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = BeitForSemanticSegmentation(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
lowerCAmelCase__ = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a__ ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : str = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase__ : List[Any] = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ : Tuple = False
lowercase__ : Dict = False
lowercase__ : Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = BeitModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
if not self.model_tester.is_training:
return
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(lowerCamelCase_ ), BeitForMaskedImageModeling]:
continue
lowerCAmelCase__ = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.train()
lowerCAmelCase__ = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
lowerCAmelCase__ = model(**lowerCamelCase_ ).loss
loss.backward()
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowerCAmelCase__ = False
lowerCAmelCase__ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(lowerCamelCase_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCAmelCase__ = model_class(lowerCamelCase_ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase_ )
model.train()
lowerCAmelCase__ = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
lowerCAmelCase__ = model(**lowerCamelCase_ ).loss
loss.backward()
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = BeitModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def _snake_case ( ) -> Optional[int]:
lowerCAmelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(lowerCamelCase_ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).pixel_values.to(lowerCamelCase_ )
# prepare bool_masked_pos
lowerCAmelCase__ = torch.ones((1, 1_96) , dtype=torch.bool ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(pixel_values=lowerCamelCase_ , bool_masked_pos=lowerCamelCase_ )
lowerCAmelCase__ = outputs.logits
# verify the logits
lowerCAmelCase__ = torch.Size((1, 1_96, 81_92) )
self.assertEqual(logits.shape , lowerCamelCase_ )
lowerCAmelCase__ = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , lowerCamelCase_ , atol=1e-2 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(lowerCamelCase_ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCamelCase_ )
lowerCAmelCase__ = outputs.logits
# verify the logits
lowerCAmelCase__ = torch.Size((1, 10_00) )
self.assertEqual(logits.shape , lowerCamelCase_ )
lowerCAmelCase__ = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) )
lowerCAmelCase__ = 2_81
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase_ )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
lowerCamelCase_ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCamelCase_ )
lowerCAmelCase__ = outputs.logits
# verify the logits
lowerCAmelCase__ = torch.Size((1, 2_18_41) )
self.assertEqual(logits.shape , lowerCamelCase_ )
lowerCAmelCase__ = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) )
lowerCAmelCase__ = 23_96
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase_ )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
lowerCAmelCase__ = model.to(lowerCamelCase_ )
lowerCAmelCase__ = BeitImageProcessor(do_resize=lowerCamelCase_ , size=6_40 , do_center_crop=lowerCamelCase_ )
lowerCAmelCase__ = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
lowerCAmelCase__ = Image.open(ds[0]['''file'''] )
lowerCAmelCase__ = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCamelCase_ )
lowerCAmelCase__ = outputs.logits
# verify the logits
lowerCAmelCase__ = torch.Size((1, 1_50, 1_60, 1_60) )
self.assertEqual(logits.shape , lowerCamelCase_ )
lowerCAmelCase__ = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
lowerCAmelCase__ = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] , device=lowerCamelCase_ , )
else:
lowerCAmelCase__ = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] , device=lowerCamelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase_ , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
lowerCAmelCase__ = model.to(lowerCamelCase_ )
lowerCAmelCase__ = BeitImageProcessor(do_resize=lowerCamelCase_ , size=6_40 , do_center_crop=lowerCamelCase_ )
lowerCAmelCase__ = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
lowerCAmelCase__ = Image.open(ds[0]['''file'''] )
lowerCAmelCase__ = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCamelCase_ )
lowerCAmelCase__ = outputs.logits.detach().cpu()
lowerCAmelCase__ = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase_ , target_sizes=[(5_00, 3_00)] )
lowerCAmelCase__ = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , lowerCamelCase_ )
lowerCAmelCase__ = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase_ )
lowerCAmelCase__ = torch.Size((1_60, 1_60) )
self.assertEqual(segmentation[0].shape , lowerCamelCase_ ) | 90 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _A ( __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
__SCREAMING_SNAKE_CASE = ksize + 1
__SCREAMING_SNAKE_CASE = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__snake_case ):
for x in range(__snake_case ):
# distance from center
__SCREAMING_SNAKE_CASE = x - ksize // 2
__SCREAMING_SNAKE_CASE = y - ksize // 2
# degree to radiant
__SCREAMING_SNAKE_CASE = theta / 180 * np.pi
__SCREAMING_SNAKE_CASE = np.cos(_theta )
__SCREAMING_SNAKE_CASE = np.sin(_theta )
# get kernel x
__SCREAMING_SNAKE_CASE = cos_theta * px + sin_theta * py
# get kernel y
__SCREAMING_SNAKE_CASE = -sin_theta * px + cos_theta * py
# fill kernel
__SCREAMING_SNAKE_CASE = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case : Union[str, Any] = imread('../image_data/lena.jpg')
# turn image in gray scale value
_snake_case : List[str] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case : int = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
_snake_case : List[str] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case : Optional[Any] = out / out.max() * 2_55
_snake_case : Union[str, Any] = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 693 | 0 |
"""simple docstring"""
def _snake_case ( snake_case__ : list ):
if not grid or not grid[0]:
raise TypeError('The grid does not contain the appropriate information' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
A = grid[0]
for row_n in range(1 , len(snake_case__ ) ):
A = grid[row_n]
A = fill_row(snake_case__ , snake_case__ )
A = grid[row_n]
return grid[-1][-1]
def _snake_case ( snake_case__ : list , snake_case__ : list ):
current_row[0] += row_above[0]
for cell_n in range(1 , len(snake_case__ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod() | 91 |
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
assert isinstance(__snake_case , __snake_case ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
__SCREAMING_SNAKE_CASE = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(__snake_case )
else:
__SCREAMING_SNAKE_CASE = sylvester(number - 1 )
__SCREAMING_SNAKE_CASE = num - 1
__SCREAMING_SNAKE_CASE = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 693 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int]=7 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : str=30 , UpperCAmelCase__ : Tuple=400 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[int]=1 / 255 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : int=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Any=True , ):
'''simple docstring'''
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase : Union[str, Any] =size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
lowercase : List[Any] =parent
lowercase : List[str] =batch_size
lowercase : Any =num_channels
lowercase : Tuple =min_resolution
lowercase : Union[str, Any] =max_resolution
lowercase : Tuple =do_resize
lowercase : Optional[Any] =size
lowercase : List[Any] =do_rescale
lowercase : Union[str, Any] =rescale_factor
lowercase : Dict =do_normalize
lowercase : int =image_mean
lowercase : Any =image_std
lowercase : Union[str, Any] =do_pad
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any=False ):
'''simple docstring'''
if not batched:
lowercase : List[Any] =image_inputs[0]
if isinstance(UpperCAmelCase__ , Image.Image ):
lowercase , lowercase : Dict =image.size
else:
lowercase , lowercase : Optional[int] =image.shape[1], image.shape[2]
if w < h:
lowercase : int =int(self.size['''shortest_edge'''] * h / w )
lowercase : Any =self.size['''shortest_edge''']
elif w > h:
lowercase : str =self.size['''shortest_edge''']
lowercase : List[str] =int(self.size['''shortest_edge'''] * w / h )
else:
lowercase : List[Any] =self.size['''shortest_edge''']
lowercase : List[Any] =self.size['''shortest_edge''']
else:
lowercase : List[str] =[]
for image in image_inputs:
lowercase , lowercase : str =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase : List[str] =max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[0] )[0]
lowercase : List[str] =max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = DetrImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Any =DetrImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_rescale''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''rescale_factor''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_pad''' ) )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : str =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase__ )
lowercase : Optional[Any] =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase__ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
# Initialize image_processing
lowercase : int =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
lowercase : Tuple =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : List[Any] =self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase , lowercase : List[str] =self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
lowercase : Union[str, Any] =image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
# Initialize image_processing
lowercase : str =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
lowercase : Any =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : Optional[int] =self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase : List[Any] =image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : Tuple =self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
# Initialize image_processing
lowercase : int =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
lowercase : Tuple =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : Optional[Any] =self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase : Dict =image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : Optional[Any] =self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
# prepare image and target
lowercase : Union[str, Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
lowercase : str =json.loads(f.read() )
lowercase : Dict ={'''image_id''': 39769, '''annotations''': target}
# encode them
lowercase : Union[str, Any] =DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50''' )
lowercase : Optional[int] =image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , return_tensors='''pt''' )
# verify pixel values
lowercase : int =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__ )
lowercase : List[str] =torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
# verify area
lowercase : Any =torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__ ) )
# verify boxes
lowercase : Any =torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__ )
lowercase : List[str] =torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1E-3 ) )
# verify image_id
lowercase : int =torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__ ) )
# verify is_crowd
lowercase : Optional[Any] =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__ ) )
# verify class_labels
lowercase : Union[str, Any] =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__ ) )
# verify orig_size
lowercase : Optional[int] =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__ ) )
# verify size
lowercase : Any =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__ ) )
@slow
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
# prepare image, target and masks_path
lowercase : List[str] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
lowercase : Tuple =json.loads(f.read() )
lowercase : Dict ={'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
lowercase : Any =pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowercase : Optional[Any] =DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50-panoptic''' )
lowercase : List[str] =image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , masks_path=UpperCAmelCase__ , return_tensors='''pt''' )
# verify pixel values
lowercase : Union[str, Any] =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__ )
lowercase : int =torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
# verify area
lowercase : Any =torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__ ) )
# verify boxes
lowercase : int =torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__ )
lowercase : int =torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1E-3 ) )
# verify image_id
lowercase : List[Any] =torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__ ) )
# verify is_crowd
lowercase : Optional[int] =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__ ) )
# verify class_labels
lowercase : Optional[Any] =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__ ) )
# verify masks
lowercase : str =822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCAmelCase__ )
# verify orig_size
lowercase : Any =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__ ) )
# verify size
lowercase : str =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__ ) )
| 92 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __lowerCAmelCase ( *_a, **_a ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", )
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_a ), [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
], )
@require_tf
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", framework="tf" )
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(_a ), [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
], )
@slow
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", )
# This is an image of 2 cats with remotes and no planes
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_a ), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
@slow
@require_tf
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", framework="tf" )
# This is an image of 2 cats with remotes and no planes
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_a ), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
| 693 | 0 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def __A (_SCREAMING_SNAKE_CASE ) ->Optional[Any]:
"""simple docstring"""
return 1.0 / (1.0 + np.exp(-_outputs ))
def __A (_SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :str = np.max(_outputs , axis=-1 , keepdims=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :int = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_SCREAMING_SNAKE_CASE )
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Optional[Any] = """sigmoid"""
__magic_name__ :Tuple = """softmax"""
__magic_name__ :str = """none"""
@add_end_docstrings(
a , r"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :int = False
__magic_name__ :List[str] = ClassificationFunction.NONE
def __init__( self , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def snake_case ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="" , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = tokenizer_kwargs
lowerCAmelCase__ :str = {}
if hasattr(self.model.config , 'return_all_scores' ) and return_all_scores is None:
lowerCAmelCase__ :Dict = self.model.config.return_all_scores
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) or top_k is None:
lowerCAmelCase__ :Optional[Any] = top_k
lowerCAmelCase__ :Optional[Any] = False
elif return_all_scores is not None:
warnings.warn(
'`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'
' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , __UpperCAmelCase , )
if return_all_scores:
lowerCAmelCase__ :Any = None
else:
lowerCAmelCase__ :Any = 1
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ :Tuple = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
lowerCAmelCase__ :List[Any] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = super().__call__(*__UpperCAmelCase , **__UpperCAmelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
lowerCAmelCase__ :List[str] = 'top_k' not in kwargs
if isinstance(args[0] , __UpperCAmelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def snake_case ( self , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.framework
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return self.tokenizer(**__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1 and isinstance(inputs[0] , __UpperCAmelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' )
return self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.model(**__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase=True ):
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
lowerCAmelCase__ :Optional[Any] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
lowerCAmelCase__ :List[Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , 'function_to_apply' ) and function_to_apply is None:
lowerCAmelCase__ :List[Any] = self.model.config.function_to_apply
else:
lowerCAmelCase__ :List[Any] = ClassificationFunction.NONE
lowerCAmelCase__ :List[str] = model_outputs['logits'][0]
lowerCAmelCase__ :str = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
lowerCAmelCase__ :List[Any] = sigmoid(__UpperCAmelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
lowerCAmelCase__ :List[str] = softmax(__UpperCAmelCase )
elif function_to_apply == ClassificationFunction.NONE:
lowerCAmelCase__ :Optional[Any] = outputs
else:
raise ValueError(F"Unrecognized `function_to_apply` argument: {function_to_apply}" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
lowerCAmelCase__ :int = [
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(__UpperCAmelCase )
]
if not _legacy:
dict_scores.sort(key=lambda __UpperCAmelCase : x["score"] , reverse=__UpperCAmelCase )
if top_k is not None:
lowerCAmelCase__ :Any = dict_scores[:top_k]
return dict_scores
| 93 |
from __future__ import annotations
import math
def _A ( __snake_case :int , __snake_case :int , __snake_case :bool , __snake_case :list[int] , __snake_case :float ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__snake_case ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__SCREAMING_SNAKE_CASE = math.log(len(__snake_case ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 693 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( __A : list[int] ) -> list[int]:
"""simple docstring"""
if len(__A ) == 0:
return array
lowercase , lowercase : List[str] =min(__A ), max(__A )
# Compute the variables
lowercase : Any =_max - _min + 1
lowercase , lowercase : Optional[Any] =[0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
lowercase : int =i - _min
lowercase : List[str] =i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
lowercase : Optional[int] =0
for i in range(__A ):
while holes_repeat[i] > 0:
lowercase : int =holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE = input('Enter numbers separated by comma:\n')
SCREAMING_SNAKE_CASE = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 94 |
def _A ( __snake_case :bytes ) -> str:
"""simple docstring"""
return "".join([hex(__snake_case )[2:].zfill(2 ).upper() for byte in list(__snake_case )] )
def _A ( __snake_case :str ) -> bytes:
"""simple docstring"""
if (len(__snake_case ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__snake_case ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(__snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 | 0 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase_ = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase_ = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
lowerCamelCase_ = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
lowerCamelCase_ = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
lowerCamelCase_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase_ = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase_ = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class UpperCamelCase_ (__A ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
__magic_name__ = DPRContextEncoderTokenizer
class UpperCamelCase_ (__A ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__magic_name__ = DPRQuestionEncoderTokenizer
lowerCamelCase_ = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCamelCase_ = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCamelCase_ = r'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(__A )
class UpperCamelCase_ :
def __call__( self : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Union[bool, str] = False , lowerCAmelCase_ : Union[bool, str] = False , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Optional[bool] = None , **lowerCAmelCase_ : Dict , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ , )
elif titles is None or texts is None:
UpperCAmelCase_ : int = titles if texts is None else texts
return super().__call__(
lowerCAmelCase_ , lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ , )
UpperCAmelCase_ : Dict = titles if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else [titles]
UpperCAmelCase_ : str = texts if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else [texts]
UpperCAmelCase_ : int = len(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = questions if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else [questions] * n_passages
assert len(lowerCAmelCase_ ) == len(
lowerCAmelCase_ ), f"""There should be as many titles than texts but got {len(lowerCAmelCase_ )} titles and {len(lowerCAmelCase_ )} texts."""
UpperCAmelCase_ : Optional[int] = super().__call__(lowerCAmelCase_ , lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ )["input_ids"]
UpperCAmelCase_ : List[str] = super().__call__(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ )["input_ids"]
UpperCAmelCase_ : Tuple = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase_ , lowerCAmelCase_ )
]
}
if return_attention_mask is not False:
UpperCAmelCase_ : Dict = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCAmelCase_ : List[Any] = attention_mask
return self.pad(lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : BatchEncoding , lowerCAmelCase_ : DPRReaderOutput , lowerCAmelCase_ : int = 16 , lowerCAmelCase_ : int = 64 , lowerCAmelCase_ : int = 4 , ) -> List[DPRSpanPrediction]:
UpperCAmelCase_ : int = reader_input["input_ids"]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = reader_output[:3]
UpperCAmelCase_ : Optional[Any] = len(lowerCAmelCase_ )
UpperCAmelCase_ : str = sorted(range(lowerCAmelCase_ ) , reverse=lowerCAmelCase_ , key=relevance_logits.__getitem__ )
UpperCAmelCase_ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
UpperCAmelCase_ : Tuple = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCAmelCase_ : List[str] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCAmelCase_ : Optional[Any] = sequence_ids.index(self.pad_token_id )
else:
UpperCAmelCase_ : Dict = len(lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase_ , top_spans=lowerCAmelCase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase_ , start_index=lowerCAmelCase_ , end_index=lowerCAmelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCAmelCase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , ) -> List[DPRSpanPrediction]:
UpperCAmelCase_ : str = []
for start_index, start_score in enumerate(lowerCAmelCase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCAmelCase_ : Union[str, Any] = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x[1] , reverse=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]"""
UpperCAmelCase_ : Any = end_index - start_index + 1
assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__A )
class UpperCamelCase_ (__A , __A ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = READER_PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = READER_PRETRAINED_INIT_CONFIGURATION
__magic_name__ = ['''input_ids''', '''attention_mask''']
__magic_name__ = DPRReaderTokenizer
| 95 |
from functools import lru_cache
def _A ( __snake_case :int ) -> set:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__snake_case )
if n > 1:
factors.add(__snake_case )
return factors
@lru_cache
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
return len(unique_prime_factors(__snake_case ) )
def _A ( __snake_case :list ) -> bool:
"""simple docstring"""
return len(set(__snake_case ) ) in (0, 1)
def _A ( __snake_case :int ) -> list:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 2
while True:
# Increment each value of a generated range
__SCREAMING_SNAKE_CASE = [base + i for i in range(__snake_case )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__SCREAMING_SNAKE_CASE = [upf_len(__snake_case ) for x in group]
checker.append(__snake_case )
# If all numbers in the list are equal, return the group variable.
if equality(__snake_case ):
return group
# Increment our base variable by 1
base += 1
def _A ( __snake_case :int = 4 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = run(__snake_case )
return results[0] if len(__snake_case ) else None
if __name__ == "__main__":
print(solution())
| 693 | 0 |
"""simple docstring"""
import math
def a ( __UpperCAmelCase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( __UpperCAmelCase : float = 0.1 ) -> int:
__magic_name__: Tuple = 3
__magic_name__: Optional[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__UpperCAmelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 96 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _A ( __snake_case :Dict ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = VideoMAEConfig()
set_architecture_configs(__snake_case , __snake_case )
if "finetuned" not in model_name:
__SCREAMING_SNAKE_CASE = False
if "finetuned" in model_name:
__SCREAMING_SNAKE_CASE = "huggingface/label-files"
if "kinetics" in model_name:
__SCREAMING_SNAKE_CASE = 400
__SCREAMING_SNAKE_CASE = "kinetics400-id2label.json"
elif "ssv2" in model_name:
__SCREAMING_SNAKE_CASE = 174
__SCREAMING_SNAKE_CASE = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
__SCREAMING_SNAKE_CASE = {int(__snake_case ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def _A ( __snake_case :Dict , __snake_case :Optional[Any] ) -> List[Any]:
"""simple docstring"""
if "small" in model_name:
__SCREAMING_SNAKE_CASE = 384
__SCREAMING_SNAKE_CASE = 1536
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = 192
__SCREAMING_SNAKE_CASE = 768
elif "large" in model_name:
__SCREAMING_SNAKE_CASE = 1024
__SCREAMING_SNAKE_CASE = 4096
__SCREAMING_SNAKE_CASE = 24
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 512
__SCREAMING_SNAKE_CASE = 2048
elif "huge" in model_name:
__SCREAMING_SNAKE_CASE = 1280
__SCREAMING_SNAKE_CASE = 5120
__SCREAMING_SNAKE_CASE = 32
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 640
__SCREAMING_SNAKE_CASE = 2560
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def _A ( __snake_case :List[Any] ) -> Optional[int]:
"""simple docstring"""
if "encoder." in name:
__SCREAMING_SNAKE_CASE = name.replace("encoder." , "" )
if "cls_token" in name:
__SCREAMING_SNAKE_CASE = name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.self" )
if "attn" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.attention" )
if "norm1" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__SCREAMING_SNAKE_CASE = name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__SCREAMING_SNAKE_CASE = name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace("head" , "classifier" )
return name
def _A ( __snake_case :Union[str, Any] , __snake_case :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = orig_state_dict.pop(__snake_case )
if key.startswith("encoder." ):
__SCREAMING_SNAKE_CASE = key.replace("encoder." , "" )
if "qkv" in key:
__SCREAMING_SNAKE_CASE = key.split("." )
if key.startswith("decoder.blocks" ):
__SCREAMING_SNAKE_CASE = config.decoder_hidden_size
__SCREAMING_SNAKE_CASE = int(key_split[2] )
__SCREAMING_SNAKE_CASE = "decoder.decoder_layers."
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = config.hidden_size
__SCREAMING_SNAKE_CASE = int(key_split[1] )
__SCREAMING_SNAKE_CASE = "videomae.encoder.layer."
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = val
return orig_state_dict
def _A ( ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
__SCREAMING_SNAKE_CASE = np.load(__snake_case )
return list(__snake_case )
def _A ( __snake_case :Optional[int] , __snake_case :List[str] , __snake_case :Union[str, Any] , __snake_case :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_videomae_config(__snake_case )
if "finetuned" in model_name:
__SCREAMING_SNAKE_CASE = VideoMAEForVideoClassification(__snake_case )
else:
__SCREAMING_SNAKE_CASE = VideoMAEForPreTraining(__snake_case )
# download original checkpoint, hosted on Google Drive
__SCREAMING_SNAKE_CASE = "pytorch_model.bin"
gdown.cached_download(__snake_case , __snake_case , quiet=__snake_case )
__SCREAMING_SNAKE_CASE = torch.load(__snake_case , map_location="cpu" )
if "model" in files:
__SCREAMING_SNAKE_CASE = files["model"]
else:
__SCREAMING_SNAKE_CASE = files["module"]
__SCREAMING_SNAKE_CASE = convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
model.eval()
# verify model on basic input
__SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__SCREAMING_SNAKE_CASE = prepare_video()
__SCREAMING_SNAKE_CASE = image_processor(__snake_case , return_tensors="pt" )
if "finetuned" not in model_name:
__SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
__SCREAMING_SNAKE_CASE = torch.load(__snake_case )
__SCREAMING_SNAKE_CASE = model(**__snake_case )
__SCREAMING_SNAKE_CASE = outputs.logits
__SCREAMING_SNAKE_CASE = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] )
elif model_name == "videomae-small-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] )
elif model_name == "videomae-base":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] )
elif model_name == "videomae-base-short":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] )
# we verified the loss both for normalized and unnormalized targets for this one
__SCREAMING_SNAKE_CASE = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] )
elif model_name == "videomae-large":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] )
elif model_name == "videomae-large-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] )
elif model_name == "videomae-huge-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] )
elif model_name == "videomae-base-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] )
elif model_name == "videomae-base-short-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] )
elif model_name == "videomae-base-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] )
elif model_name == "videomae-base-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __snake_case , atol=1e-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__SCREAMING_SNAKE_CASE = outputs.loss
assert torch.allclose(__snake_case , __snake_case , atol=1e-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
model.save_pretrained(__snake_case )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(__snake_case , organization="nielsr" )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_snake_case : Optional[int] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 693 | 0 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__a = '\\n Text data.\n Second line of data.'
__a = 'file'
@pytest.fixture(scope='''session''' )
def a ( snake_case__: Dict ):
'''simple docstring'''
lowercase_ = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
lowercase_ = bytes(snake_case__ , '''utf-8''' )
with zstd.open(snake_case__ , '''wb''' ) as f:
f.write(snake_case__ )
return path
@pytest.fixture
def a ( snake_case__: int ):
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , snake_case__ ) , '''w''' ) as f:
f.write(snake_case__ )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def a ( snake_case__: List[Any] , snake_case__: Union[str, Any] , snake_case__: Tuple , snake_case__: Union[str, Any] , snake_case__: List[Any] , snake_case__: Optional[Any] ):
'''simple docstring'''
lowercase_ = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
lowercase_ = input_paths[compression_format]
lowercase_ = tmp_path / '''cache'''
lowercase_ = DownloadConfig(cache_dir=snake_case__ , extract_compressed_file=snake_case__ )
lowercase_ = cached_path(snake_case__ , download_config=snake_case__ )
with open(snake_case__ ) as f:
lowercase_ = f.read()
with open(snake_case__ ) as f:
lowercase_ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def a ( snake_case__: Tuple , snake_case__: Optional[int] , snake_case__: Optional[int] , snake_case__: List[str] , snake_case__: Dict ):
'''simple docstring'''
lowercase_ = '''custom_cache'''
lowercase_ = '''custom_extracted_dir'''
lowercase_ = tmp_path / '''custom_extracted_path'''
if default_extracted:
lowercase_ = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , snake_case__ )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(snake_case__ ) )
lowercase_ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
lowercase_ = xz_file
lowercase_ = (
DownloadConfig(extract_compressed_file=snake_case__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=snake_case__ )
)
lowercase_ = cached_path(snake_case__ , download_config=snake_case__ )
assert Path(snake_case__ ).parent.parts[-2:] == expected
def a ( snake_case__: Any ):
'''simple docstring'''
# absolute path
lowercase_ = str(Path(snake_case__ ).resolve() )
assert cached_path(snake_case__ ) == text_file
# relative path
lowercase_ = str(Path(snake_case__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(snake_case__ ) == text_file
def a ( snake_case__: Optional[Any] ):
'''simple docstring'''
# absolute path
lowercase_ = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(snake_case__ ):
cached_path(snake_case__ )
# relative path
lowercase_ = '''./__missing_file__.txt'''
with pytest.raises(snake_case__ ):
cached_path(snake_case__ )
def a ( snake_case__: Union[str, Any] ):
'''simple docstring'''
lowercase_ = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(snake_case__ ) as f:
lowercase_ = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , snake_case__ )
def a ( ):
'''simple docstring'''
with pytest.raises(snake_case__ ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , snake_case__ )
def a ( snake_case__: Optional[int] ):
'''simple docstring'''
lowercase_ = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(snake_case__ ):
http_get('''https://huggingface.co''' , temp_file=snake_case__ )
with pytest.raises(snake_case__ ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , snake_case__ )
def a ( snake_case__: int ):
'''simple docstring'''
lowercase_ = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(snake_case__ ):
ftp_get('''ftp://huggingface.co''' , temp_file=snake_case__ )
with pytest.raises(snake_case__ ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , snake_case__ )
def a ( snake_case__: Tuple ):
'''simple docstring'''
lowercase_ = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(snake_case__ ):
fsspec_get('''s3://huggingface.co''' , temp_file=snake_case__ )
with pytest.raises(snake_case__ ):
fsspec_head('''s3://huggingface.co''' )
| 97 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_snake_case : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
def __init__( self, *_a, **_a ) -> None:
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead.", _a, )
super().__init__(*_a, **_a )
| 693 | 0 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowercase__ : Optional[int] = logging.getLogger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str=None ) -> int:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ , question_encoder_tokenizer=lowerCAmelCase__ , generator_tokenizer=lowerCAmelCase__ , index=lowerCAmelCase__ , init_retrieval=lowerCAmelCase__ , )
_UpperCamelCase = None
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
logger.info('''initializing retrieval''' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('''dist initialized''' )
# needs to be set manually
_UpperCamelCase = self._infer_socket_ifname()
# avoid clash with the NCCL port
_UpperCamelCase = str(distributed_port + 1 )
_UpperCamelCase = dist.new_group(ranks=lowerCAmelCase__ , backend='''gloo''' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('''dist not initialized / main''' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def snake_case__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
return dist.get_rank(group=self.process_group ) == 0
def snake_case__ ( self : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str]=torch.floataa ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = torch.empty(lowerCAmelCase__ , dtype=lowerCAmelCase__ )
dist.scatter(lowerCAmelCase__ , src=0 , scatter_list=lowerCAmelCase__ , group=self.process_group )
return target_tensor
def snake_case__ ( self : int ) -> int:
'''simple docstring'''
_UpperCamelCase = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_UpperCamelCase = next((addr for addr in addrs if addr.startswith('''e''' )) , lowerCAmelCase__ )
return ifname
def snake_case__ ( self : Dict , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : int ) -> Tuple[np.ndarray, List[dict]]:
'''simple docstring'''
if not dist.is_initialized():
_UpperCamelCase , _UpperCamelCase = self._main_retrieve(lowerCAmelCase__ , lowerCAmelCase__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCAmelCase__ )
# distributed training
_UpperCamelCase = dist.get_world_size(group=self.process_group )
# gather logic
_UpperCamelCase = None
if self._is_main():
_UpperCamelCase = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(lowerCAmelCase__ )]
dist.gather(torch.tensor(lowerCAmelCase__ ) , dst=0 , gather_list=lowerCAmelCase__ , group=self.process_group )
# scatter logic
_UpperCamelCase = question_hidden_states.shape[0]
_UpperCamelCase = []
_UpperCamelCase = []
if self._is_main():
assert len(lowerCAmelCase__ ) == world_size
_UpperCamelCase , _UpperCamelCase = self._main_retrieve(torch.cat(lowerCAmelCase__ ).numpy() , lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase = torch.tensor(lowerCAmelCase__ ), torch.tensor(lowerCAmelCase__ )
_UpperCamelCase = self._chunk_tensor(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = self._chunk_tensor(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = self._scattered(lowerCAmelCase__ , [n_queries, n_docs] , target_type=torch.intaa )
_UpperCamelCase = self._scattered(lowerCAmelCase__ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowerCAmelCase__ )
| 98 |
from math import sqrt
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0
for i in range(1 , int(sqrt(__snake_case ) + 1 ) ):
if n % i == 0 and i != sqrt(__snake_case ):
total += i + n // i
elif i == sqrt(__snake_case ):
total += i
return total - n
def _A ( __snake_case :int = 1_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = sum(
i
for i in range(1 , __snake_case )
if sum_of_divisors(sum_of_divisors(__snake_case ) ) == i and sum_of_divisors(__snake_case ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 693 | 0 |
import os
# Precomputes a list of the 100 first triangular numbers
SCREAMING_SNAKE_CASE = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def a ():
__a = os.path.dirname(os.path.realpath(lowerCAmelCase__ ) )
__a = os.path.join(lowerCAmelCase__ , """words.txt""" )
__a = """"""
with open(lowerCAmelCase__ ) as f:
__a = f.readline()
__a = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
__a = [
word
for word in [sum(ord(lowerCAmelCase__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowerCAmelCase__ )
if __name__ == "__main__":
print(solution())
| 99 |
def _A ( __snake_case :int , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 | 0 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __snake_case :
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=3 , A_=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ = num_patches + 1
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , )
def lowercase_ ( self , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFViTModel(config=A_ )
SCREAMING_SNAKE_CASE__ = model(A_ , training=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
SCREAMING_SNAKE_CASE__ = self.image_size // 2
SCREAMING_SNAKE_CASE__ = pixel_values[:, :, :image_size, :image_size]
SCREAMING_SNAKE_CASE__ = model(A_ , interpolate_pos_encoding=A_ , training=A_ )
SCREAMING_SNAKE_CASE__ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowercase_ ( self , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ = TFViTForImageClassification(A_ )
SCREAMING_SNAKE_CASE__ = model(A_ , labels=A_ , training=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
SCREAMING_SNAKE_CASE__ = self.image_size // 2
SCREAMING_SNAKE_CASE__ = pixel_values[:, :, :image_size, :image_size]
SCREAMING_SNAKE_CASE__ = model(A_ , interpolate_pos_encoding=A_ , training=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = TFViTForImageClassification(A_ )
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
lowerCamelCase__ : Any = (
{"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase__ : int = False
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Optional[int] = False
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFViTModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def lowercase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def lowercase_ ( self ):
'''simple docstring'''
pass
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
SCREAMING_SNAKE_CASE__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , tf.keras.layers.Layer ) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(A_ )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(A_ )
def __snake_case ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase_ ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=A_ , return_tensors='''tf''' )
# forward pass
SCREAMING_SNAKE_CASE__ = model(**A_ )
# verify the logits
SCREAMING_SNAKE_CASE__ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , A_ )
SCREAMING_SNAKE_CASE__ = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , A_ , atol=1E-4 )
| 100 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a, _a=99, _a=13, _a=7, _a=9, _a=True, _a=True, _a=False, _a=32, _a=5, _a=4, _a=37, _a=8, _a=0.1, _a=0.002, _a=1, _a=0, _a=0, _a=None, _a=None, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = encoder_seq_length
__SCREAMING_SNAKE_CASE = decoder_seq_length
# For common tests
__SCREAMING_SNAKE_CASE = self.decoder_seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_attention_mask
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = d_ff
__SCREAMING_SNAKE_CASE = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE = dropout_rate
__SCREAMING_SNAKE_CASE = initializer_factor
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = decoder_start_token_id
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = decoder_layers
def __lowerCAmelCase ( self ) -> Optional[int]:
return TaConfig.from_pretrained("google/umt5-base" )
def __lowerCAmelCase ( self, _a, _a, _a, _a=None, _a=None, _a=None, _a=None, _a=None, ) -> int:
if attention_mask is None:
__SCREAMING_SNAKE_CASE = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.num_hidden_layers, config.num_attention_heads, device=_a )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.num_decoder_layers, config.num_attention_heads, device=_a )
if cross_attn_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers, config.num_attention_heads, device=_a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__SCREAMING_SNAKE_CASE = input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = decoder_input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = config.num_attention_heads
__SCREAMING_SNAKE_CASE = self.prepare_inputs_dict(_a, _a, _a )
return config, input_dict
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCAmelCase ( self ) -> Optional[int]:
return TaConfig(
vocab_size=1_66, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return TaConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a )
model.to(_a )
model.eval()
__SCREAMING_SNAKE_CASE = model(
input_ids=_a, decoder_input_ids=_a, attention_mask=_a, decoder_attention_mask=_a, )
__SCREAMING_SNAKE_CASE = model(input_ids=_a, decoder_input_ids=_a )
__SCREAMING_SNAKE_CASE = result.last_hidden_state
__SCREAMING_SNAKE_CASE = result.past_key_values
__SCREAMING_SNAKE_CASE = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_a ), config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ), 4 )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Tuple:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).get_decoder().to(_a ).eval()
# first forward pass
__SCREAMING_SNAKE_CASE = model(_a, use_cache=_a )
__SCREAMING_SNAKE_CASE = model(_a )
__SCREAMING_SNAKE_CASE = model(_a, use_cache=_a )
self.parent.assertTrue(len(_a ) == len(_a ) )
self.parent.assertTrue(len(_a ) == len(_a ) + 1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1), config.vocab_size )
# append to next input_ids and
__SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens], dim=-1 )
__SCREAMING_SNAKE_CASE = model(_a )["last_hidden_state"]
__SCREAMING_SNAKE_CASE = model(_a, past_key_values=_a )["last_hidden_state"]
# select random slice
__SCREAMING_SNAKE_CASE = ids_tensor((1,), output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a, _a, atol=1E-3 ) )
def __lowerCAmelCase ( self, _a, _a, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).to(_a ).half().eval()
__SCREAMING_SNAKE_CASE = model(**_a )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(_a ).any().item() )
@require_torch
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =(
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE__ =(UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ =(
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE__ =[0.8, 0.9]
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = UMTaModel(config_and_inputs[0] ).to(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_a, (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]), f'''{tmpdirname}/t5_test.onnx''', export_params=_a, opset_version=9, input_names=["input_ids", "decoder_input_ids"], )
@unittest.skipIf(torch_device == "cpu", "Cant do half precision" )
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_a )
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = config_and_inputs[0]
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(_a ).eval()
model.to(_a )
__SCREAMING_SNAKE_CASE = {
"head_mask": torch.zeros(config.num_layers, config.num_heads, device=_a ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ),
}
for attn_name, (name, mask) in zip(_a, head_masking.items() ):
__SCREAMING_SNAKE_CASE = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers, config.num_heads, device=_a )
__SCREAMING_SNAKE_CASE = model.generate(
config_and_inputs[1]["input_ids"], num_beams=1, max_length=3, output_attentions=_a, return_dict_in_generate=_a, **_a, )
# We check the state of decoder_attentions and cross_attentions just from the last step
__SCREAMING_SNAKE_CASE = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ), 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def __lowerCAmelCase ( self ) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration.from_pretrained("google/umt5-small", return_dict=_a ).to(_a )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("google/umt5-small", use_fast=_a, legacy=_a )
__SCREAMING_SNAKE_CASE = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
__SCREAMING_SNAKE_CASE = tokenizer(_a, return_tensors="pt", padding=_a ).input_ids
# fmt: off
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(_a, _a )
__SCREAMING_SNAKE_CASE = model.generate(input_ids.to(_a ) )
__SCREAMING_SNAKE_CASE = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_a )
self.assertEqual(_a, _a )
| 693 | 0 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
SCREAMING_SNAKE_CASE_ : List[str] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(lowerCAmelCase__ )
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ : Union[str, Any] = load_dataset('nielsr/rvlcdip-demo' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = dataset['train'][0]['image'].convert('RGB' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processor(lowerCAmelCase__ , return_tensors='pt' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Any = model(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = outputs.logits
SCREAMING_SNAKE_CASE_ : Dict = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=lowerCAmelCase__ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 101 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _A ( __snake_case :BertModel , __snake_case :str , __snake_case :str ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
__SCREAMING_SNAKE_CASE = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__snake_case ):
os.makedirs(__snake_case )
__SCREAMING_SNAKE_CASE = model.state_dict()
def to_tf_var_name(__snake_case :str ):
for patt, repl in iter(__snake_case ):
__SCREAMING_SNAKE_CASE = name.replace(__snake_case , __snake_case )
return f'''bert/{name}'''
def create_tf_var(__snake_case :np.ndarray , __snake_case :str , __snake_case :tf.Session ):
__SCREAMING_SNAKE_CASE = tf.dtypes.as_dtype(tensor.dtype )
__SCREAMING_SNAKE_CASE = tf.get_variable(dtype=__snake_case , shape=tensor.shape , name=__snake_case , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__snake_case )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__SCREAMING_SNAKE_CASE = to_tf_var_name(__snake_case )
__SCREAMING_SNAKE_CASE = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__SCREAMING_SNAKE_CASE = torch_tensor.T
__SCREAMING_SNAKE_CASE = create_tf_var(tensor=__snake_case , name=__snake_case , session=__snake_case )
tf.keras.backend.set_value(__snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = session.run(__snake_case )
print(f'''Successfully created {tf_name}: {np.allclose(__snake_case , __snake_case )}''' )
__SCREAMING_SNAKE_CASE = tf.train.Saver(tf.trainable_variables() )
saver.save(__snake_case , os.path.join(__snake_case , model_name.replace("-" , "_" ) + ".ckpt" ) )
def _A ( __snake_case :str=None ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=__snake_case , required=__snake_case , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=__snake_case , default=__snake_case , required=__snake_case , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=__snake_case , required=__snake_case , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=__snake_case , required=__snake_case , help="Directory in which to save tensorflow model" )
__SCREAMING_SNAKE_CASE = parser.parse_args(__snake_case )
__SCREAMING_SNAKE_CASE = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__snake_case , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 693 | 0 |
"""simple docstring"""
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_validate_point(SCREAMING_SNAKE_CASE )
_validate_point(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
if point:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for item in point:
if not isinstance(SCREAMING_SNAKE_CASE , (int, float) ):
UpperCamelCase : List[Any] = (
"""Expected a list of numbers as input, found """
f"""{type(SCREAMING_SNAKE_CASE ).__name__}"""
)
raise TypeError(SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : List[Any] = f"""Expected a list of numbers as input, found {type(SCREAMING_SNAKE_CASE ).__name__}"""
raise TypeError(SCREAMING_SNAKE_CASE )
else:
raise ValueError("""Missing an input""" )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_validate_point(SCREAMING_SNAKE_CASE )
_validate_point(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_snake_case : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =["""input_values""", """padding_mask"""]
def __init__( self, _a = 1, _a = 2_40_00, _a = 0.0, _a = None, _a = None, **_a, ) -> str:
super().__init__(feature_size=_a, sampling_rate=_a, padding_value=_a, **_a )
__SCREAMING_SNAKE_CASE = chunk_length_s
__SCREAMING_SNAKE_CASE = overlap
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1, int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self, _a, _a = None, _a = False, _a = None, _a = None, _a = None, ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = bool(
isinstance(_a, (list, tuple) ) and (isinstance(raw_audio[0], (np.ndarray, tuple, list) )) )
if is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray(_a, dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_a, np.ndarray ):
__SCREAMING_SNAKE_CASE = np.asarray(_a, dtype=np.floataa )
elif isinstance(_a, np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray(_a ).T]
# verify inputs are valid
for idx, example in enumerate(_a ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
__SCREAMING_SNAKE_CASE = min(array.shape[0] for array in raw_audio )
__SCREAMING_SNAKE_CASE = int(np.floor(max_length / self.chunk_stride ) )
__SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
__SCREAMING_SNAKE_CASE = max(array.shape[0] for array in raw_audio )
__SCREAMING_SNAKE_CASE = int(np.ceil(max_length / self.chunk_stride ) )
__SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length
__SCREAMING_SNAKE_CASE = "max_length"
else:
__SCREAMING_SNAKE_CASE = input_values
# normal padding on batch
if padded_inputs is None:
__SCREAMING_SNAKE_CASE = self.pad(
_a, max_length=_a, truncation=_a, padding=_a, return_attention_mask=_a, )
if padding:
__SCREAMING_SNAKE_CASE = padded_inputs.pop("attention_mask" )
__SCREAMING_SNAKE_CASE = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
__SCREAMING_SNAKE_CASE = example[..., None]
input_values.append(example.T )
__SCREAMING_SNAKE_CASE = input_values
if return_tensors is not None:
__SCREAMING_SNAKE_CASE = padded_inputs.convert_to_tensors(_a )
return padded_inputs
| 693 | 0 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
@staticmethod
def __UpperCAmelCase ( *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Optional[Any] ):
"""simple docstring"""
pass
def snake_case ( lowerCAmelCase_ ) -> str:
_snake_case = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
A__ : List[str] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __UpperCAmelCase ( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple ):
"""simple docstring"""
_snake_case = DepthEstimationPipeline(model=__lowerCamelCase , image_processor=__lowerCamelCase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
_snake_case = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , __lowerCamelCase )
import datasets
_snake_case = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
_snake_case = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , __lowerCamelCase , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
pass
@slow
@require_torch
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = '''Intel/dpt-large'''
_snake_case = pipeline('''depth-estimation''' , model=__lowerCamelCase )
_snake_case = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
_snake_case = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.6_6_2 )
@require_torch
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 103 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =42
SCREAMING_SNAKE_CASE__ =42
def __init__( self, _a, _a ) -> Dict:
super().__init__()
self.register_modules(unet=_a, scheduler=_a )
@torch.no_grad()
def __call__( self, _a = 1, _a = 20_00, _a = None, _a = "pil", _a = True, **_a, ) -> Union[ImagePipelineOutput, Tuple]:
__SCREAMING_SNAKE_CASE = self.unet.config.sample_size
__SCREAMING_SNAKE_CASE = (batch_size, 3, img_size, img_size)
__SCREAMING_SNAKE_CASE = self.unet
__SCREAMING_SNAKE_CASE = randn_tensor(_a, generator=_a ) * self.scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE = sample.to(self.device )
self.scheduler.set_timesteps(_a )
self.scheduler.set_sigmas(_a )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__SCREAMING_SNAKE_CASE = self.scheduler.sigmas[i] * torch.ones(shape[0], device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__SCREAMING_SNAKE_CASE = self.unet(_a, _a ).sample
__SCREAMING_SNAKE_CASE = self.scheduler.step_correct(_a, _a, generator=_a ).prev_sample
# prediction step
__SCREAMING_SNAKE_CASE = model(_a, _a ).sample
__SCREAMING_SNAKE_CASE = self.scheduler.step_pred(_a, _a, _a, generator=_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output.prev_sample, output.prev_sample_mean
__SCREAMING_SNAKE_CASE = sample_mean.clamp(0, 1 )
__SCREAMING_SNAKE_CASE = sample.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE = self.numpy_to_pil(_a )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_a )
| 693 | 0 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _lowerCamelCase ( UpperCAmelCase_ : str, UpperCAmelCase_ : complex, UpperCAmelCase_ : str = "x", UpperCAmelCase_ : float = 10**-10, UpperCAmelCase_ : int = 1, ) -> complex:
"""simple docstring"""
A__ = symbols(UpperCAmelCase_ )
A__ = lambdify(UpperCAmelCase_, UpperCAmelCase_ )
A__ = lambdify(UpperCAmelCase_, diff(UpperCAmelCase_, UpperCAmelCase_ ) )
A__ = starting_point
while True:
if diff_function(UpperCAmelCase_ ) != 0:
A__ = prev_guess - multiplicity * func(UpperCAmelCase_ ) / diff_function(
UpperCAmelCase_ )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
A__ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(f'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}')
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
f'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
f'{newton_raphson("exp(x) - 1", 10, precision=0.005)}',
)
# Find root of cos(x)
print(f'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 104 |
def _A ( __snake_case :int = 400_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__snake_case )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = b, a + b
return sum(__snake_case )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 693 | 0 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCamelCase__ : List[Any] = getLogger(__name__)
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : int = 8 , lowerCamelCase_ : int = 10_24 , lowerCamelCase_ : List[Any]="val" , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : str=False , lowerCamelCase_ : Union[str, Any]="summarization" , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : List[Any]=1 , lowerCamelCase_ : Dict = None , lowerCamelCase_ : Any="" , **lowerCamelCase_ : Union[str, Any] , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = str(lowerCamelCase_ )
assert local_rank is not None
torch.distributed.init_process_group(backend='nccl' , rank=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = Path(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = save_dir.joinpath(F'rank_{local_rank}_output.json' )
torch.cuda.set_device(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase_ ).cuda()
if fpaa:
SCREAMING_SNAKE_CASE_ : List[str] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowerCamelCase_ , lowerCamelCase_ ) # update config with task specific params
SCREAMING_SNAKE_CASE_ : List[str] = generate_kwargs.pop('num_beams' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
SCREAMING_SNAKE_CASE_ : Optional[int] = num_return_sequences
SCREAMING_SNAKE_CASE_ : Any = AutoTokenizer.from_pretrained(lowerCamelCase_ )
logger.info(F'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
if max_source_length is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.model_max_length
if prefix is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = prefix or getattr(model.config , 'prefix' , '' ) or ''
SCREAMING_SNAKE_CASE_ : List[Any] = SeqaSeqDataset(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , max_target_length=10_24 , type_path=lowerCamelCase_ , n_obs=lowerCamelCase_ , prefix=lowerCamelCase_ , **lowerCamelCase_ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
SCREAMING_SNAKE_CASE_ : List[Any] = ds.make_sortish_sampler(lowerCamelCase_ , distributed=lowerCamelCase_ , add_extra_examples=lowerCamelCase_ , shuffle=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = DataLoader(lowerCamelCase_ , sampler=lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
for batch in tqdm(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Any = model.generate(
input_ids=batch['input_ids'].to(model.device ) , attention_mask=batch['attention_mask'].to(model.device ) , num_return_sequences=lowerCamelCase_ , num_beams=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = batch['ids']
if num_return_sequences > 1:
SCREAMING_SNAKE_CASE_ : List[Any] = chunks(lowerCamelCase_ , lowerCamelCase_ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowerCamelCase_ ):
results.append({'pred': pred, 'id': ids[i].item()} )
save_json(lowerCamelCase_ , lowerCamelCase_ )
return results, sampler.num_replicas
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = argparse.ArgumentParser(
epilog='Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate' )
parser.add_argument('--data_dir' , type=lowerCamelCase_ , help='like cnn_dm/test.source' )
parser.add_argument(
'--model_name' , type=lowerCamelCase_ , help='like facebook/bart-large-cnn,t5-base, etc.' , default='sshleifer/distilbart-xsum-12-3' , )
parser.add_argument('--save_dir' , type=lowerCamelCase_ , help='where to save' , default='tmp_gen' )
parser.add_argument('--max_source_length' , type=lowerCamelCase_ , default=lowerCamelCase_ )
parser.add_argument(
'--type_path' , type=lowerCamelCase_ , default='test' , help='which subset to evaluate typically train/val/test' )
parser.add_argument('--task' , type=lowerCamelCase_ , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=lowerCamelCase_ , default=8 , required=lowerCamelCase_ , help='batch size' )
parser.add_argument(
'--local_rank' , type=lowerCamelCase_ , default=-1 , required=lowerCamelCase_ , help='should be passed by distributed.launch' )
parser.add_argument(
'--n_obs' , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ , help='How many observations. Defaults to all.' )
parser.add_argument(
'--num_return_sequences' , type=lowerCamelCase_ , default=1 , required=lowerCamelCase_ , help='How many sequences to return' )
parser.add_argument(
'--sync_timeout' , type=lowerCamelCase_ , default=6_00 , required=lowerCamelCase_ , help='How long should master process wait for other processes to finish.' , )
parser.add_argument('--src_lang' , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ )
parser.add_argument('--tgt_lang' , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ )
parser.add_argument(
'--prefix' , type=lowerCamelCase_ , required=lowerCamelCase_ , default=lowerCamelCase_ , help='will be added to the begininng of src examples' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--debug' , action='store_true' )
SCREAMING_SNAKE_CASE_ : List[Any] = time.time()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = parser.parse_known_args()
SCREAMING_SNAKE_CASE_ : List[str] = parse_numeric_n_bool_cl_kwargs(lowerCamelCase_ )
if generate_kwargs and args.local_rank <= 0:
print(F'parsed the following generate kwargs: {generate_kwargs}' )
SCREAMING_SNAKE_CASE_ : Tuple = Path(args.save_dir + '_tmp' )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ ) # this handles locking.
SCREAMING_SNAKE_CASE_ : Dict = list(json_save_dir.glob('rank_*.json' ) )
if intermediate_files:
raise ValueError(F'Found files at {json_save_dir} please move or remove them.' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
SCREAMING_SNAKE_CASE_ : Any = {}
if args.src_lang is not None:
SCREAMING_SNAKE_CASE_ : Dict = args.src_lang
if args.tgt_lang is not None:
SCREAMING_SNAKE_CASE_ : Tuple = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = eval_data_dir(
args.data_dir , lowerCamelCase_ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowerCamelCase_ , **lowerCamelCase_ , )
if args.local_rank <= 0:
SCREAMING_SNAKE_CASE_ : Tuple = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = gather_results_from_each_node(lowerCamelCase_ , lowerCamelCase_ , args.sync_timeout )
SCREAMING_SNAKE_CASE_ : int = combine_partial_results(lowerCamelCase_ )
if args.num_return_sequences > 1:
SCREAMING_SNAKE_CASE_ : List[Any] = save_dir.joinpath('pseudolabel_results.json' )
print(F'Saving aggregated results at {save_path}, intermediate in {json_save_dir}/' )
save_json(lowerCamelCase_ , lowerCamelCase_ )
return
SCREAMING_SNAKE_CASE_ : Any = Path(args.data_dir ).joinpath(args.type_path + '.target' )
with open(lowerCamelCase_ ) as f:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [x.rstrip() for x in f.readlines()][: len(lowerCamelCase_ )]
# Calculate metrics, save metrics, and save _generations.txt
SCREAMING_SNAKE_CASE_ : Dict = 'translation' in args.task
SCREAMING_SNAKE_CASE_ : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'bleu' if calc_bleu else 'rouge'
SCREAMING_SNAKE_CASE_ : Dict = score_fn(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = time.time() - start_time
SCREAMING_SNAKE_CASE_ : List[Any] = round(runtime / metrics['n_obs'] , 4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
SCREAMING_SNAKE_CASE_ : List[str] = save_dir.joinpath(F'{args.type_path}_{metric_name}.json' )
save_json(lowerCamelCase_ , lowerCamelCase_ , indent=lowerCamelCase_ )
print(lowerCamelCase_ )
write_txt_file(lowerCamelCase_ , save_dir.joinpath(F'{args.type_path}_generations.txt' ) )
if args.debug:
write_txt_file(lowerCamelCase_ , save_dir.joinpath(F'{args.type_path}.target' ) )
else:
shutil.rmtree(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> List:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = []
for partial_result in partial_results:
records.extend(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x["id"] )
SCREAMING_SNAKE_CASE_ : List[str] = [x['pred'] for x in records]
return preds
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : str ) -> List[Dict[str, List]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = time.time()
logger.info('waiting for all nodes to finish' )
SCREAMING_SNAKE_CASE_ : Optional[int] = None
while (time.time() - start_wait) < timeout:
SCREAMING_SNAKE_CASE_ : str = list(save_dir.glob('rank_*.json' ) )
if len(lowerCamelCase_ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
SCREAMING_SNAKE_CASE_ : Tuple = lmap(lowerCamelCase_ , lowerCamelCase_ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('Rank 0 gave up on waiting for other processes' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 105 |
from __future__ import annotations
_snake_case : str = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_snake_case : Optional[int] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = len(__snake_case )
for i in range(__snake_case ):
__SCREAMING_SNAKE_CASE = -1
for j in range(i + 1 , __snake_case ):
if arr[i] < arr[j]:
__SCREAMING_SNAKE_CASE = arr[j]
break
result.append(__snake_case )
return result
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for i, outer in enumerate(__snake_case ):
__SCREAMING_SNAKE_CASE = -1
for inner in arr[i + 1 :]:
if outer < inner:
__SCREAMING_SNAKE_CASE = inner
break
result.append(__snake_case )
return result
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = len(__snake_case )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [-1] * arr_size
for index in reversed(range(__snake_case ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__SCREAMING_SNAKE_CASE = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_snake_case : Optional[Any] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 693 | 0 |
import os
from datetime import datetime as dt
from github import Github
__snake_case :int =[
'good first issue',
'feature request',
'wip',
]
def lowerCamelCase_ ( ) -> Any:
'''simple docstring'''
A = Github(os.environ['GITHUB_TOKEN'] )
A = g.get_repo('huggingface/accelerate' )
A = repo.get_issues(state='open' )
for issue in open_issues:
A = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase__ : i.created_at , reverse=lowerCAmelCase__ )
A = comments[0] if len(lowerCAmelCase__ ) > 0 else None
A = dt.utcnow()
A = (current_time - issue.updated_at).days
A = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main() | 106 |
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a ) -> Any:
__SCREAMING_SNAKE_CASE = data
__SCREAMING_SNAKE_CASE = None
def __repr__( self ) -> str:
return f'''Node({self.data})'''
class __SCREAMING_SNAKE_CASE :
def __init__( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = None
def __iter__( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.head
while node:
yield node.data
__SCREAMING_SNAKE_CASE = node.next
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join([str(_a ) for item in self] )
def __getitem__( self, _a ) -> Any:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self, _a, _a ) -> None:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
__SCREAMING_SNAKE_CASE = self.head
for _ in range(_a ):
__SCREAMING_SNAKE_CASE = current.next
__SCREAMING_SNAKE_CASE = data
def __lowerCAmelCase ( self, _a ) -> None:
self.insert_nth(len(self ), _a )
def __lowerCAmelCase ( self, _a ) -> None:
self.insert_nth(0, _a )
def __lowerCAmelCase ( self, _a, _a ) -> None:
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
__SCREAMING_SNAKE_CASE = Node(_a )
if self.head is None:
__SCREAMING_SNAKE_CASE = new_node
elif index == 0:
__SCREAMING_SNAKE_CASE = self.head # link new_node to head
__SCREAMING_SNAKE_CASE = new_node
else:
__SCREAMING_SNAKE_CASE = self.head
for _ in range(index - 1 ):
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = new_node
def __lowerCAmelCase ( self ) -> None: # print every node data
print(self )
def __lowerCAmelCase ( self ) -> Any:
return self.delete_nth(0 )
def __lowerCAmelCase ( self ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def __lowerCAmelCase ( self, _a = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
__SCREAMING_SNAKE_CASE = self.head # default first node
if index == 0:
__SCREAMING_SNAKE_CASE = self.head.next
else:
__SCREAMING_SNAKE_CASE = self.head
for _ in range(index - 1 ):
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next.next
return delete_node.data
def __lowerCAmelCase ( self ) -> bool:
return self.head is None
def __lowerCAmelCase ( self ) -> None:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = self.head
while current:
# Store the current node's next node.
__SCREAMING_SNAKE_CASE = current.next
# Make the current node's next point backwards
__SCREAMING_SNAKE_CASE = prev
# Make the previous node be the current node
__SCREAMING_SNAKE_CASE = current
# Make the current node the next node (to progress iteration)
__SCREAMING_SNAKE_CASE = next_node
# Return prev in order to put the head at the end
__SCREAMING_SNAKE_CASE = prev
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = LinkedList()
assert linked_list.is_empty() is True
assert str(__snake_case ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__snake_case ) == i
linked_list.insert_nth(__snake_case , i + 1 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__snake_case ) == 9
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__SCREAMING_SNAKE_CASE = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(-8 , 1 ) )
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [
-9,
100,
Node(7734_5112 ),
"dlrow olleH",
7,
5555,
0,
-1_9_2.5_5_5_5_5,
"Hello, world!",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
__SCREAMING_SNAKE_CASE = LinkedList()
for i in test_input:
linked_list.insert_tail(__snake_case )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__SCREAMING_SNAKE_CASE = linked_list.delete_head()
assert result == -9
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__SCREAMING_SNAKE_CASE = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__SCREAMING_SNAKE_CASE = linked_list.delete_nth(10 )
assert result is None
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__snake_case )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__snake_case )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _A ( ) -> Union[str, Any]:
"""simple docstring"""
from doctest import testmod
testmod()
__SCREAMING_SNAKE_CASE = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(__snake_case )
print("\nReading/changing Node data using indexing:" )
print(f'''Element at Position 1: {linked_list[1]}''' )
__SCREAMING_SNAKE_CASE = input("Enter New Value: " ).strip()
print("New list:" )
print(__snake_case )
print(f'''length of linked_list is : {len(__snake_case )}''' )
if __name__ == "__main__":
main()
| 693 | 0 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _SCREAMING_SNAKE_CASE ( __snake_case : str ):
_A = filter(lambda __snake_case : p.requires_grad , model.parameters() )
_A = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_UpperCAmelCase : Any = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ( __snake_case : str , __snake_case : Optional[int] ):
if metric == "rouge2":
_A = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_A = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_A = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_A = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
' function.' )
_A = ModelCheckpoint(
dirpath=__snake_case , filename=__snake_case , monitor=F'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _SCREAMING_SNAKE_CASE ( __snake_case : Any , __snake_case : List[Any] ):
return EarlyStopping(
monitor=F'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=__snake_case , verbose=__snake_case , )
class lowercase_ ( pl.Callback ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[int], UpperCamelCase__ : List[str], UpperCamelCase__ : Tuple ) -> Union[str, Any]:
_A = {f'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(UpperCamelCase__ )
@rank_zero_only
def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : pl.Trainer, UpperCamelCase__ : pl.LightningModule, UpperCamelCase__ : str, UpperCamelCase__ : Dict=True ) -> None:
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_A = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
_A = Path(pl_module.hparams.output_dir )
if type_path == "test":
_A = od / 'test_results.txt'
_A = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_A = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_A = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=UpperCamelCase__ )
generations_file.parent.mkdir(exist_ok=UpperCamelCase__ )
with open(UpperCamelCase__, 'a+' ) as writer:
for key in sorted(UpperCamelCase__ ):
if key in ["log", "progress_bar", "preds"]:
continue
_A = metrics[key]
if isinstance(UpperCamelCase__, torch.Tensor ):
_A = val.item()
_A = f'{key}: {val:.6f}\n'
writer.write(UpperCamelCase__ )
if not save_generations:
return
if "preds" in metrics:
_A = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(UpperCamelCase__ )
@rank_zero_only
def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : Dict, UpperCamelCase__ : List[Any] ) -> Union[str, Any]:
try:
_A = pl_module.model.model.num_parameters()
except AttributeError:
_A = pl_module.model.num_parameters()
_A = count_trainable_parameters(UpperCamelCase__ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )
@rank_zero_only
def __UpperCAmelCase ( self : int, UpperCamelCase__ : pl.Trainer, UpperCamelCase__ : pl.LightningModule ) -> Any:
save_json(pl_module.metrics, pl_module.metrics_save_path )
return self._write_logs(UpperCamelCase__, UpperCamelCase__, 'test' )
@rank_zero_only
def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : pl.Trainer, UpperCamelCase__ : str ) -> Optional[Any]:
save_json(pl_module.metrics, pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 107 |
import argparse
import json
from tqdm import tqdm
def _A ( ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=__snake_case , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=__snake_case , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=__snake_case , help="where to store parsed gold_data_path file" , )
__SCREAMING_SNAKE_CASE = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
__SCREAMING_SNAKE_CASE = json.load(__snake_case )
for dpr_record in tqdm(__snake_case ):
__SCREAMING_SNAKE_CASE = dpr_record["question"]
__SCREAMING_SNAKE_CASE = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(__snake_case ) + "\n" )
if __name__ == "__main__":
main()
| 693 | 0 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
__a: str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
__a: list[int] = [ord(letter) for letter in string.ascii_lowercase]
__a: set[int] = {ord(char) for char in VALID_CHARS}
__a: list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> str | None:
_UpperCAmelCase = ""
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for keychar, cipherchar in zip(cycle(__snake_case ) , __snake_case ):
_UpperCAmelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__snake_case )
return decoded
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> list[str]:
_UpperCAmelCase = []
for key in product(__snake_case , repeat=3 ):
_UpperCAmelCase = try_key(__snake_case , __snake_case )
if encoded is not None:
possibles.append(__snake_case )
return possibles
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def _SCREAMING_SNAKE_CASE ( __snake_case = "p059_cipher.txt" ) -> int:
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = Path(__snake_case ).parent.joinpath(__snake_case ).read_text(encoding="""utf-8""" )
_UpperCAmelCase = [int(__snake_case ) for number in data.strip().split(""",""" )]
_UpperCAmelCase = filter_valid_chars(__snake_case )
for common_word in COMMON_WORDS:
_UpperCAmelCase = filter_common_word(__snake_case , __snake_case )
if len(__snake_case ) == 1:
break
_UpperCAmelCase = possibles[0]
return sum(ord(__snake_case ) for char in decoded_text )
if __name__ == "__main__":
print(F"{solution() = }") | 108 |
def _A ( __snake_case :int = 10**9 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 693 | 0 |
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __a ( unittest.TestCase ):
__UpperCamelCase : str = MODEL_FOR_MASKED_LM_MAPPING
__UpperCamelCase : Dict = TF_MODEL_FOR_MASKED_LM_MAPPING
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pipeline(task="""fill-mask""" ,model="""sshleifer/tiny-distilroberta-base""" ,top_k=2 ,framework="""tf""" )
__SCREAMING_SNAKE_CASE = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(lowerCamelCase ,decimals=6 ) ,[
{"""sequence""": """My name is grouped""", """score""": 2.1E-0_5, """token""": 3_8015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1E-0_5, """token""": 2_5506, """token_str""": """ accuser"""},
] ,)
__SCREAMING_SNAKE_CASE = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(lowerCamelCase ,decimals=6 ) ,[
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1E-0_5,
"""token""": 3_8015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1E-0_5,
"""token""": 2_5506,
"""token_str""": """ accuser""",
},
] ,)
__SCREAMING_SNAKE_CASE = unmasker("""My name is <mask>""" ,targets=[""" Patrick""", """ Clara""", """ Teven"""] ,top_k=3 )
self.assertEqual(
nested_simplify(lowerCamelCase ,decimals=6 ) ,[
{"""sequence""": """My name is Clara""", """score""": 2E-0_5, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2E-0_5, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9E-0_5, """token""": 2941, """token_str""": """ Te"""},
] ,)
@require_torch
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pipeline(task="""fill-mask""" ,model="""sshleifer/tiny-distilroberta-base""" ,top_k=2 ,framework="""pt""" )
__SCREAMING_SNAKE_CASE = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(lowerCamelCase ,decimals=6 ) ,[
{"""sequence""": """My name is Maul""", """score""": 2.2E-0_5, """token""": 3_5676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2E-0_5, """token""": 1_6416, """token_str""": """ELS"""},
] ,)
__SCREAMING_SNAKE_CASE = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(lowerCamelCase ,decimals=6 ) ,[
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2E-0_5,
"""token""": 3_5676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2E-0_5, """token""": 1_6416, """token_str""": """ELS"""},
] ,)
__SCREAMING_SNAKE_CASE = unmasker("""My name is <mask>""" ,targets=[""" Patrick""", """ Clara""", """ Teven"""] ,top_k=3 )
self.assertEqual(
nested_simplify(lowerCamelCase ,decimals=6 ) ,[
{"""sequence""": """My name is Patrick""", """score""": 2.1E-0_5, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2E-0_5, """token""": 2941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2E-0_5, """token""": 1_3606, """token_str""": """ Clara"""},
] ,)
__SCREAMING_SNAKE_CASE = unmasker("""My name is <mask> <mask>""" ,top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase ,decimals=6 ) ,[
[
{
"""score""": 2.2E-0_5,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2E-0_5, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2E-0_5,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2E-0_5, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] ,)
@require_torch_gpu
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pipeline("""fill-mask""" ,model="""hf-internal-testing/tiny-random-distilbert""" ,device=0 ,framework="""pt""" )
# convert model to fp16
pipe.model.half()
__SCREAMING_SNAKE_CASE = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowerCamelCase ,lowerCamelCase )
@slow
@require_torch
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pipeline(task="""fill-mask""" ,model="""distilroberta-base""" ,top_k=2 ,framework="""pt""" )
self.run_large_test(lowerCamelCase )
@slow
@require_tf
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pipeline(task="""fill-mask""" ,model="""distilroberta-base""" ,top_k=2 ,framework="""tf""" )
self.run_large_test(lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(lowerCamelCase ) ,[
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1573, """token_str""": """ Chris"""},
] ,)
__SCREAMING_SNAKE_CASE = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(lowerCamelCase ) ,[
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 1_2790,
"""token_str""": """ Lyon""",
},
] ,)
__SCREAMING_SNAKE_CASE = unmasker("""My name is <mask>""" ,targets=[""" Patrick""", """ Clara""", """ Teven"""] ,top_k=3 )
self.assertEqual(
nested_simplify(lowerCamelCase ) ,[
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2941, """token_str""": """ Te"""},
] ,)
@require_torch
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pipeline(task="""fill-mask""" ,model="""sshleifer/tiny-distilroberta-base""" ,framework="""pt""" )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
self.run_pipeline_test(lowerCamelCase ,[] )
@require_tf
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pipeline(task="""fill-mask""" ,model="""sshleifer/tiny-distilroberta-base""" ,framework="""tf""" )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
self.run_pipeline_test(lowerCamelCase ,[] )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Tuple ,lowerCamelCase : List[Any] ):
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
__SCREAMING_SNAKE_CASE = FillMaskPipeline(model=lowerCamelCase ,tokenizer=lowerCamelCase )
__SCREAMING_SNAKE_CASE = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = fill_masker.tokenizer
__SCREAMING_SNAKE_CASE = fill_masker.model
__SCREAMING_SNAKE_CASE = fill_masker(
f"""This is a {tokenizer.mask_token}""" ,)
self.assertEqual(
lowerCamelCase ,[
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
] ,)
__SCREAMING_SNAKE_CASE = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
lowerCamelCase ,[
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
] ,)
__SCREAMING_SNAKE_CASE = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
lowerCamelCase ,[
[
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
],
[
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
],
] ,)
with self.assertRaises(lowerCamelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(lowerCamelCase ):
fill_masker("""This is""" )
self.run_test_top_k(lowerCamelCase ,lowerCamelCase )
self.run_test_targets(lowerCamelCase ,lowerCamelCase )
self.run_test_top_k_targets(lowerCamelCase ,lowerCamelCase )
self.fill_mask_with_duplicate_targets_and_top_k(lowerCamelCase ,lowerCamelCase )
self.fill_mask_with_multiple_masks(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tokenizer.get_vocab()
__SCREAMING_SNAKE_CASE = sorted(vocab.keys() )[:2]
# Pipeline argument
__SCREAMING_SNAKE_CASE = FillMaskPipeline(model=lowerCamelCase ,tokenizer=lowerCamelCase ,targets=lowerCamelCase )
__SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
lowerCamelCase ,[
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
] ,)
__SCREAMING_SNAKE_CASE = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} ,set(lowerCamelCase ) )
# Call argument
__SCREAMING_SNAKE_CASE = FillMaskPipeline(model=lowerCamelCase ,tokenizer=lowerCamelCase )
__SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" ,targets=lowerCamelCase )
self.assertEqual(
lowerCamelCase ,[
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
] ,)
__SCREAMING_SNAKE_CASE = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} ,set(lowerCamelCase ) )
# Score equivalence
__SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" ,targets=lowerCamelCase )
__SCREAMING_SNAKE_CASE = [top_mask["""token_str"""] for top_mask in outputs]
__SCREAMING_SNAKE_CASE = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowerCamelCase ) == set(lowerCamelCase ):
__SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" ,targets=lowerCamelCase )
__SCREAMING_SNAKE_CASE = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowerCamelCase ) ,nested_simplify(lowerCamelCase ) )
# Raises with invalid
with self.assertRaises(lowerCamelCase ):
__SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" ,targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowerCamelCase ):
__SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" ,targets=[""""""] )
with self.assertRaises(lowerCamelCase ):
__SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" ,targets="""""" )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FillMaskPipeline(model=lowerCamelCase ,tokenizer=lowerCamelCase ,top_k=2 )
__SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
lowerCamelCase ,[
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
] ,)
__SCREAMING_SNAKE_CASE = FillMaskPipeline(model=lowerCamelCase ,tokenizer=lowerCamelCase )
__SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" ,top_k=2 )
self.assertEqual(
lowerCamelCase ,[
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
] ,)
self.assertEqual(nested_simplify(lowerCamelCase ) ,nested_simplify(lowerCamelCase ) )
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Tuple ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tokenizer.get_vocab()
__SCREAMING_SNAKE_CASE = FillMaskPipeline(model=lowerCamelCase ,tokenizer=lowerCamelCase )
# top_k=2, ntargets=3
__SCREAMING_SNAKE_CASE = sorted(vocab.keys() )[:3]
__SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" ,top_k=2 ,targets=lowerCamelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
__SCREAMING_SNAKE_CASE = [el["""token_str"""] for el in sorted(lowerCamelCase ,key=lambda lowerCamelCase : x["score"] ,reverse=lowerCamelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowerCamelCase ).issubset(lowerCamelCase ):
__SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" ,top_k=3 ,targets=lowerCamelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowerCamelCase ) ,nested_simplify(lowerCamelCase ) )
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : Dict ,lowerCamelCase : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FillMaskPipeline(model=lowerCamelCase ,tokenizer=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.get_vocab()
# String duplicates + id duplicates
__SCREAMING_SNAKE_CASE = sorted(vocab.keys() )[:3]
__SCREAMING_SNAKE_CASE = [targets[0], targets[1], targets[0], targets[2], targets[1]]
__SCREAMING_SNAKE_CASE = fill_masker(f"""My name is {tokenizer.mask_token}""" ,targets=lowerCamelCase ,top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowerCamelCase ) ,3 )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[int] ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FillMaskPipeline(model=lowerCamelCase ,tokenizer=lowerCamelCase )
__SCREAMING_SNAKE_CASE = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" ,top_k=2 )
self.assertEqual(
lowerCamelCase ,[
[
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
],
[
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
],
[
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
],
] ,)
| 109 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_snake_case , _snake_case , _snake_case : List[Any] = False, False, False
@dataclass
class __SCREAMING_SNAKE_CASE :
SCREAMING_SNAKE_CASE__ =None
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =None
# Automatically constructed
SCREAMING_SNAKE_CASE__ ="dict"
SCREAMING_SNAKE_CASE__ =pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
SCREAMING_SNAKE_CASE__ =field(default="""Audio""" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self ) -> Optional[int]:
return self.pa_type
def __lowerCAmelCase ( self, _a ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(_a, _a ):
return {"bytes": None, "path": value}
elif isinstance(_a, _a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__SCREAMING_SNAKE_CASE = BytesIO()
sf.write(_a, value["array"], value["sampling_rate"], format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__SCREAMING_SNAKE_CASE = np.frombuffer(value["bytes"], dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
__SCREAMING_SNAKE_CASE = np.memmap(value["path"], dtype="h", mode="r" ).astype(np.floataa ) / 3_27_67
__SCREAMING_SNAKE_CASE = BytesIO(bytes() )
sf.write(_a, _a, value["sampling_rate"], format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __lowerCAmelCase ( self, _a, _a = None ) -> dict:
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
__SCREAMING_SNAKE_CASE = xsplitext(_a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
__SCREAMING_SNAKE_CASE = token_per_repo_id or {}
__SCREAMING_SNAKE_CASE = path.split("::" )[-1]
try:
__SCREAMING_SNAKE_CASE = string_to_dict(_a, config.HUB_DATASETS_URL )["repo_id"]
__SCREAMING_SNAKE_CASE = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__SCREAMING_SNAKE_CASE = None
with xopen(_a, "rb", use_auth_token=_a ) as f:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a )
__SCREAMING_SNAKE_CASE = array.T
if self.mono:
__SCREAMING_SNAKE_CASE = librosa.to_mono(_a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
__SCREAMING_SNAKE_CASE = librosa.resample(_a, orig_sr=_a, target_sr=self.sampling_rate )
__SCREAMING_SNAKE_CASE = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __lowerCAmelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def __lowerCAmelCase ( self, _a ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
__SCREAMING_SNAKE_CASE = pa.array([Audio().encode_example(_a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
__SCREAMING_SNAKE_CASE = storage.field("bytes" )
else:
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
__SCREAMING_SNAKE_CASE = storage.field("path" )
else:
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
return array_cast(_a, self.pa_type )
def __lowerCAmelCase ( self, _a ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_a ):
with xopen(_a, "rb" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
return bytes_
__SCREAMING_SNAKE_CASE = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
__SCREAMING_SNAKE_CASE = pa.array(
[os.path.basename(_a ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(_a, self.pa_type )
| 693 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : List[str] =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Dict =self.dummy_uncond_unet
lowercase : List[str] =ScoreSdeVeScheduler()
lowercase : Optional[int] =ScoreSdeVePipeline(unet=_a , scheduler=_a )
sde_ve.to(_a )
sde_ve.set_progress_bar_config(disable=_a )
lowercase : Union[str, Any] =torch.manual_seed(0 )
lowercase : List[Any] =sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=_a ).images
lowercase : Any =torch.manual_seed(0 )
lowercase : Union[str, Any] =sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=_a , return_dict=_a )[
0
]
lowercase : Union[str, Any] =image[0, -3:, -3:, -1]
lowercase : str =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase : Optional[int] =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : List[Any] ='''google/ncsnpp-church-256'''
lowercase : Tuple =UNetaDModel.from_pretrained(_a )
lowercase : int =ScoreSdeVeScheduler.from_pretrained(_a )
lowercase : Optional[int] =ScoreSdeVePipeline(unet=_a , scheduler=_a )
sde_ve.to(_a )
sde_ve.set_progress_bar_config(disable=_a )
lowercase : List[str] =torch.manual_seed(0 )
lowercase : Optional[int] =sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=_a ).images
lowercase : int =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase : Optional[int] =np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 92 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =(IPNDMScheduler,)
SCREAMING_SNAKE_CASE__ =(("""num_inference_steps""", 50),)
def __lowerCAmelCase ( self, **_a ) -> str:
__SCREAMING_SNAKE_CASE = {"num_train_timesteps": 10_00}
config.update(**_a )
return config
def __lowerCAmelCase ( self, _a=0, **_a ) -> List[Any]:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
if time_step is None:
__SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
__SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a )
new_scheduler.set_timesteps(_a )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self ) -> str:
pass
def __lowerCAmelCase ( self, _a=0, **_a ) -> int:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals (must be after setting timesteps)
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
if time_step is None:
__SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
__SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a )
# copy over dummy past residuals
new_scheduler.set_timesteps(_a )
# copy over dummy past residual (must be after setting timesteps)
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self, **_a ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE = 10
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(_a )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE = model(_a, _a )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE = model(_a, _a )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample
return sample
def __lowerCAmelCase ( self ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
if num_inference_steps is not None and hasattr(_a, "set_timesteps" ):
scheduler.set_timesteps(_a )
elif num_inference_steps is not None and not hasattr(_a, "set_timesteps" ):
__SCREAMING_SNAKE_CASE = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.timesteps[5]
__SCREAMING_SNAKE_CASE = scheduler.timesteps[6]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def __lowerCAmelCase ( self ) -> str:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a, time_step=_a )
def __lowerCAmelCase ( self ) -> Optional[Any]:
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=_a, time_step=_a )
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.full_loop()
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 693 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
a_ = field(default="""audio-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
a_ = Features({"""audio""": Audio()} )
a_ = Features({"""labels""": ClassLabel} )
a_ = """audio"""
a_ = """labels"""
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , _a ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
UpperCAmelCase_ = copy.deepcopy(self )
UpperCAmelCase_ = self.label_schema.copy()
UpperCAmelCase_ = features[self.label_column]
UpperCAmelCase_ = label_schema
return task_template
@property
def lowerCamelCase_ ( self ) -> Dict[str, str]:
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 660 |
import random
from .binary_exp_mod import bin_exp_mod
def _A ( __snake_case :List[Any] , __snake_case :Union[str, Any]=1000 ) -> int:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__SCREAMING_SNAKE_CASE = n - 1
__SCREAMING_SNAKE_CASE = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__SCREAMING_SNAKE_CASE = 0
while count < prec:
__SCREAMING_SNAKE_CASE = random.randint(2 , n - 1 )
__SCREAMING_SNAKE_CASE = bin_exp_mod(__snake_case , __snake_case , __snake_case )
if b != 1:
__SCREAMING_SNAKE_CASE = True
for _ in range(__snake_case ):
if b == n - 1:
__SCREAMING_SNAKE_CASE = False
break
__SCREAMING_SNAKE_CASE = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_snake_case : int = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 693 | 0 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def __A ( lowerCAmelCase_ , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 414 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _A ( __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
__SCREAMING_SNAKE_CASE = ksize + 1
__SCREAMING_SNAKE_CASE = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__snake_case ):
for x in range(__snake_case ):
# distance from center
__SCREAMING_SNAKE_CASE = x - ksize // 2
__SCREAMING_SNAKE_CASE = y - ksize // 2
# degree to radiant
__SCREAMING_SNAKE_CASE = theta / 180 * np.pi
__SCREAMING_SNAKE_CASE = np.cos(_theta )
__SCREAMING_SNAKE_CASE = np.sin(_theta )
# get kernel x
__SCREAMING_SNAKE_CASE = cos_theta * px + sin_theta * py
# get kernel y
__SCREAMING_SNAKE_CASE = -sin_theta * px + cos_theta * py
# fill kernel
__SCREAMING_SNAKE_CASE = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case : Union[str, Any] = imread('../image_data/lena.jpg')
# turn image in gray scale value
_snake_case : List[str] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case : int = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
_snake_case : List[str] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case : Optional[Any] = out / out.max() * 2_55
_snake_case : Union[str, Any] = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 693 | 0 |
import os
from pathlib import Path
def a__ ( ):
from torch.utils.cpp_extension import load
SCREAMING_SNAKE_CASE_ : str = Path(__snake_case ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr'
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
root / filename
for filename in [
'vision.cpp',
os.path.join('cpu', 'ms_deform_attn_cpu.cpp' ),
os.path.join('cuda', 'ms_deform_attn_cuda.cu' ),
]
]
load(
'MultiScaleDeformableAttention', __snake_case, with_cuda=__snake_case, extra_include_paths=[str(__snake_case )], extra_cflags=['-DWITH_CUDA=1'], extra_cuda_cflags=[
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
], )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 101 |
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
assert isinstance(__snake_case , __snake_case ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
__SCREAMING_SNAKE_CASE = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(__snake_case )
else:
__SCREAMING_SNAKE_CASE = sylvester(number - 1 )
__SCREAMING_SNAKE_CASE = num - 1
__SCREAMING_SNAKE_CASE = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 693 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE : Optional[int] = "ChineseCLIPImageProcessor"
SCREAMING_SNAKE_CASE : int = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Union[str, Any] , _UpperCamelCase : List[str]=None , _UpperCamelCase : int=None , **_UpperCamelCase : List[Any] ) ->List[Any]:
snake_case_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
snake_case_ = kwargs.pop('''feature_extractor''' )
snake_case_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
snake_case_ = self.image_processor
def __call__( self : str , _UpperCamelCase : int=None , _UpperCamelCase : List[str]=None , _UpperCamelCase : int=None , **_UpperCamelCase : Any ) ->int:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
snake_case_ = self.tokenizer(_a , return_tensors=_a , **_a )
if images is not None:
snake_case_ = self.image_processor(_a , return_tensors=_a , **_a )
if text is not None and images is not None:
snake_case_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def snake_case__( self : int , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Tuple ) ->str:
return self.tokenizer.batch_decode(*_a , **_a )
def snake_case__( self : Any , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : Dict ) ->Dict:
return self.tokenizer.decode(*_a , **_a )
@property
def snake_case__( self : int ) ->Union[str, Any]:
snake_case_ = self.tokenizer.model_input_names
snake_case_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def snake_case__( self : int ) ->Optional[int]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class | 39 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __lowerCAmelCase ( *_a, **_a ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", )
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_a ), [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
], )
@require_tf
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", framework="tf" )
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(_a ), [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
], )
@slow
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", )
# This is an image of 2 cats with remotes and no planes
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_a ), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
@slow
@require_tf
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", framework="tf" )
# This is an image of 2 cats with remotes and no planes
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_a ), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
| 693 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class a__ :
def __init__( self : List[Any] ,a__ : Dict ,a__ : Dict) -> None:
"""simple docstring"""
if len(_a) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''')
_lowerCAmelCase:Optional[int] = list(_a)
_lowerCAmelCase:int = degree
def __add__( self : Union[str, Any] ,a__ : Optional[int]) -> Polynomial:
"""simple docstring"""
if self.degree > polynomial_a.degree:
_lowerCAmelCase:Optional[Any] = self.coefficients[:]
for i in range(polynomial_a.degree + 1):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree ,_a)
else:
_lowerCAmelCase:Any = polynomial_a.coefficients[:]
for i in range(self.degree + 1):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree ,_a)
def __sub__( self : List[Any] ,a__ : Dict) -> Polynomial:
"""simple docstring"""
return self + polynomial_a * Polynomial(0 ,[-1])
def __neg__( self : Optional[Any]) -> Polynomial:
"""simple docstring"""
return Polynomial(self.degree ,[-c for c in self.coefficients])
def __mul__( self : str ,a__ : Union[str, Any]) -> Polynomial:
"""simple docstring"""
_lowerCAmelCase:Dict = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1):
for j in range(polynomial_a.degree + 1):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree ,_a)
def __UpperCamelCase ( self : Optional[int] ,a__ : Optional[Any]) -> int | float:
"""simple docstring"""
_lowerCAmelCase:Tuple = 0
for i in range(self.degree + 1):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Dict) -> str:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = ''''''
for i in range(self.degree ,-1 ,-1):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i]))
elif i == 1:
polynomial += str(abs(self.coefficients[i])) + "x"
else:
polynomial += str(abs(self.coefficients[i])) + "x^" + str(_a)
return polynomial
def __repr__( self : Optional[int]) -> str:
"""simple docstring"""
return self.__str__()
def __UpperCamelCase ( self : List[Any]) -> Polynomial:
"""simple docstring"""
_lowerCAmelCase:List[Any] = [0] * self.degree
for i in range(self.degree):
_lowerCAmelCase:Union[str, Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 ,_a)
def __UpperCamelCase ( self : Tuple ,a__ : Tuple = 0) -> Polynomial:
"""simple docstring"""
_lowerCAmelCase:int = [0] * (self.degree + 2)
_lowerCAmelCase:Union[str, Any] = constant
for i in range(self.degree + 1):
_lowerCAmelCase:List[str] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 ,_a)
def __eq__( self : List[Any] ,a__ : List[str]) -> bool:
"""simple docstring"""
if not isinstance(_a ,_a):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Any ,a__ : Any) -> bool:
"""simple docstring"""
return not self.__eq__(_a)
| 227 |
from __future__ import annotations
import math
def _A ( __snake_case :int , __snake_case :int , __snake_case :bool , __snake_case :list[int] , __snake_case :float ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__snake_case ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__SCREAMING_SNAKE_CASE = math.log(len(__snake_case ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 693 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Optional[int] = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 352 |
def _A ( __snake_case :bytes ) -> str:
"""simple docstring"""
return "".join([hex(__snake_case )[2:].zfill(2 ).upper() for byte in list(__snake_case )] )
def _A ( __snake_case :str ) -> bytes:
"""simple docstring"""
if (len(__snake_case ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__snake_case ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(__snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase__ : Tuple = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = """wavlm"""
def __init__( self : str , snake_case_ : Tuple=3_2 , snake_case_ : Optional[int]=7_6_8 , snake_case_ : str=1_2 , snake_case_ : List[str]=1_2 , snake_case_ : Any=3_0_7_2 , snake_case_ : int="gelu" , snake_case_ : Any=0.1 , snake_case_ : Dict=0.1 , snake_case_ : List[Any]=0.1 , snake_case_ : Tuple=0.0 , snake_case_ : Optional[int]=0.1 , snake_case_ : List[str]=0.1 , snake_case_ : Any=0.0_2 , snake_case_ : Optional[int]=1e-5 , snake_case_ : List[Any]="group" , snake_case_ : Optional[Any]="gelu" , snake_case_ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case_ : List[Any]=(5, 2, 2, 2, 2, 2, 2) , snake_case_ : Optional[Any]=(1_0, 3, 3, 3, 3, 2, 2) , snake_case_ : Union[str, Any]=False , snake_case_ : Optional[int]=1_2_8 , snake_case_ : Optional[Any]=1_6 , snake_case_ : Union[str, Any]=3_2_0 , snake_case_ : Dict=8_0_0 , snake_case_ : Union[str, Any]=False , snake_case_ : List[str]=True , snake_case_ : Tuple=0.0_5 , snake_case_ : Optional[int]=1_0 , snake_case_ : str=2 , snake_case_ : Tuple=0.0 , snake_case_ : Union[str, Any]=1_0 , snake_case_ : Any=3_2_0 , snake_case_ : List[str]=2 , snake_case_ : str=0.1 , snake_case_ : Dict=1_0_0 , snake_case_ : Optional[int]=2_5_6 , snake_case_ : Union[str, Any]=2_5_6 , snake_case_ : Any=0.1 , snake_case_ : Dict="mean" , snake_case_ : List[str]=False , snake_case_ : Optional[Any]=False , snake_case_ : List[Any]=2_5_6 , snake_case_ : List[str]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , snake_case_ : int=(5, 3, 3, 1, 1) , snake_case_ : Optional[Any]=(1, 2, 3, 1, 1) , snake_case_ : Optional[Any]=5_1_2 , snake_case_ : Dict=8_0 , snake_case_ : List[Any]=0 , snake_case_ : int=1 , snake_case_ : Optional[int]=2 , snake_case_ : Tuple=False , snake_case_ : Tuple=3 , snake_case_ : Tuple=2 , snake_case_ : List[Any]=3 , snake_case_ : int=None , **snake_case_ : Optional[int] , ):
'''simple docstring'''
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
snake_case__ : Union[str, Any] = hidden_size
snake_case__ : List[Any] = feat_extract_norm
snake_case__ : Optional[Any] = feat_extract_activation
snake_case__ : Tuple = list(_a )
snake_case__ : Tuple = list(_a )
snake_case__ : int = list(_a )
snake_case__ : List[Any] = conv_bias
snake_case__ : int = num_buckets
snake_case__ : Union[str, Any] = max_bucket_distance
snake_case__ : Optional[int] = num_conv_pos_embeddings
snake_case__ : str = num_conv_pos_embedding_groups
snake_case__ : Any = len(self.conv_dim )
snake_case__ : int = num_hidden_layers
snake_case__ : Optional[int] = intermediate_size
snake_case__ : Union[str, Any] = hidden_act
snake_case__ : str = num_attention_heads
snake_case__ : List[str] = hidden_dropout
snake_case__ : Optional[Any] = attention_dropout
snake_case__ : List[str] = activation_dropout
snake_case__ : Tuple = feat_proj_dropout
snake_case__ : int = final_dropout
snake_case__ : Any = layerdrop
snake_case__ : str = layer_norm_eps
snake_case__ : Optional[Any] = initializer_range
snake_case__ : int = num_ctc_classes
snake_case__ : List[str] = vocab_size
snake_case__ : List[str] = do_stable_layer_norm
snake_case__ : Dict = use_weighted_layer_sum
snake_case__ : Tuple = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case__ : Optional[Any] = apply_spec_augment
snake_case__ : int = mask_time_prob
snake_case__ : List[str] = mask_time_length
snake_case__ : Any = mask_time_min_masks
snake_case__ : Any = mask_feature_prob
snake_case__ : Any = mask_feature_length
# parameters for pretraining with codevector quantized representations
snake_case__ : Any = num_codevectors_per_group
snake_case__ : Optional[Any] = num_codevector_groups
snake_case__ : Dict = contrastive_logits_temperature
snake_case__ : int = num_negatives
snake_case__ : Dict = codevector_dim
snake_case__ : Union[str, Any] = proj_codevector_dim
snake_case__ : Union[str, Any] = diversity_loss_weight
# ctc loss
snake_case__ : str = ctc_loss_reduction
snake_case__ : Any = ctc_zero_infinity
# adapter
snake_case__ : List[str] = add_adapter
snake_case__ : List[str] = adapter_kernel_size
snake_case__ : str = adapter_stride
snake_case__ : Tuple = num_adapter_layers
snake_case__ : Tuple = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case__ : Dict = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case__ : Dict = list(_a )
snake_case__ : str = list(_a )
snake_case__ : Optional[Any] = list(_a )
snake_case__ : int = xvector_output_dim
@property
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 347 |
from functools import lru_cache
def _A ( __snake_case :int ) -> set:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__snake_case )
if n > 1:
factors.add(__snake_case )
return factors
@lru_cache
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
return len(unique_prime_factors(__snake_case ) )
def _A ( __snake_case :list ) -> bool:
"""simple docstring"""
return len(set(__snake_case ) ) in (0, 1)
def _A ( __snake_case :int ) -> list:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 2
while True:
# Increment each value of a generated range
__SCREAMING_SNAKE_CASE = [base + i for i in range(__snake_case )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__SCREAMING_SNAKE_CASE = [upf_len(__snake_case ) for x in group]
checker.append(__snake_case )
# If all numbers in the list are equal, return the group variable.
if equality(__snake_case ):
return group
# Increment our base variable by 1
base += 1
def _A ( __snake_case :int = 4 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = run(__snake_case )
return results[0] if len(__snake_case ) else None
if __name__ == "__main__":
print(solution())
| 693 | 0 |
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : str ):
if is_torch_version('<' , '2.0.0' ) or not hasattr(__snake_case , '_dynamo' ):
return False
return isinstance(__snake_case , torch._dynamo.eval_frame.OptimizedModule )
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : bool = True ):
UpperCAmelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
UpperCAmelCase = is_compiled_module(__snake_case )
if is_compiled:
UpperCAmelCase = model
UpperCAmelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__snake_case , __snake_case ):
UpperCAmelCase = model.module
if not keep_fpaa_wrapper:
UpperCAmelCase = getattr(__snake_case , 'forward' )
UpperCAmelCase = model.__dict__.pop('_original_forward' , __snake_case )
if original_forward is not None:
while hasattr(__snake_case , '__wrapped__' ):
UpperCAmelCase = forward.__wrapped__
if forward == original_forward:
break
UpperCAmelCase = forward
if getattr(__snake_case , '_converted_to_transformer_engine' , __snake_case ):
convert_model(__snake_case , to_transformer_engine=__snake_case )
if is_compiled:
UpperCAmelCase = model
UpperCAmelCase = compiled_model
return model
def lowerCamelCase__ ( ):
PartialState().wait_for_everyone()
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__snake_case , __snake_case )
elif PartialState().local_process_index == 0:
torch.save(__snake_case , __snake_case )
@contextmanager
def lowerCamelCase__ ( **SCREAMING_SNAKE_CASE : Union[str, Any] ):
for key, value in kwargs.items():
UpperCAmelCase = str(__snake_case )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : List[Any] ):
if not hasattr(__snake_case , '__qualname__' ) and not hasattr(__snake_case , '__name__' ):
UpperCAmelCase = getattr(__snake_case , '__class__' , __snake_case )
if hasattr(__snake_case , '__qualname__' ):
return obj.__qualname__
if hasattr(__snake_case , '__name__' ):
return obj.__name__
return str(__snake_case )
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple ):
for key, value in source.items():
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase = destination.setdefault(__snake_case , {} )
merge_dicts(__snake_case , __snake_case )
else:
UpperCAmelCase = value
return destination
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int = None ):
if port is None:
UpperCAmelCase = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 447 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _A ( __snake_case :Dict ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = VideoMAEConfig()
set_architecture_configs(__snake_case , __snake_case )
if "finetuned" not in model_name:
__SCREAMING_SNAKE_CASE = False
if "finetuned" in model_name:
__SCREAMING_SNAKE_CASE = "huggingface/label-files"
if "kinetics" in model_name:
__SCREAMING_SNAKE_CASE = 400
__SCREAMING_SNAKE_CASE = "kinetics400-id2label.json"
elif "ssv2" in model_name:
__SCREAMING_SNAKE_CASE = 174
__SCREAMING_SNAKE_CASE = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
__SCREAMING_SNAKE_CASE = {int(__snake_case ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def _A ( __snake_case :Dict , __snake_case :Optional[Any] ) -> List[Any]:
"""simple docstring"""
if "small" in model_name:
__SCREAMING_SNAKE_CASE = 384
__SCREAMING_SNAKE_CASE = 1536
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = 192
__SCREAMING_SNAKE_CASE = 768
elif "large" in model_name:
__SCREAMING_SNAKE_CASE = 1024
__SCREAMING_SNAKE_CASE = 4096
__SCREAMING_SNAKE_CASE = 24
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 512
__SCREAMING_SNAKE_CASE = 2048
elif "huge" in model_name:
__SCREAMING_SNAKE_CASE = 1280
__SCREAMING_SNAKE_CASE = 5120
__SCREAMING_SNAKE_CASE = 32
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 640
__SCREAMING_SNAKE_CASE = 2560
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def _A ( __snake_case :List[Any] ) -> Optional[int]:
"""simple docstring"""
if "encoder." in name:
__SCREAMING_SNAKE_CASE = name.replace("encoder." , "" )
if "cls_token" in name:
__SCREAMING_SNAKE_CASE = name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.self" )
if "attn" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.attention" )
if "norm1" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__SCREAMING_SNAKE_CASE = name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__SCREAMING_SNAKE_CASE = name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace("head" , "classifier" )
return name
def _A ( __snake_case :Union[str, Any] , __snake_case :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = orig_state_dict.pop(__snake_case )
if key.startswith("encoder." ):
__SCREAMING_SNAKE_CASE = key.replace("encoder." , "" )
if "qkv" in key:
__SCREAMING_SNAKE_CASE = key.split("." )
if key.startswith("decoder.blocks" ):
__SCREAMING_SNAKE_CASE = config.decoder_hidden_size
__SCREAMING_SNAKE_CASE = int(key_split[2] )
__SCREAMING_SNAKE_CASE = "decoder.decoder_layers."
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = config.hidden_size
__SCREAMING_SNAKE_CASE = int(key_split[1] )
__SCREAMING_SNAKE_CASE = "videomae.encoder.layer."
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = val
return orig_state_dict
def _A ( ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
__SCREAMING_SNAKE_CASE = np.load(__snake_case )
return list(__snake_case )
def _A ( __snake_case :Optional[int] , __snake_case :List[str] , __snake_case :Union[str, Any] , __snake_case :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_videomae_config(__snake_case )
if "finetuned" in model_name:
__SCREAMING_SNAKE_CASE = VideoMAEForVideoClassification(__snake_case )
else:
__SCREAMING_SNAKE_CASE = VideoMAEForPreTraining(__snake_case )
# download original checkpoint, hosted on Google Drive
__SCREAMING_SNAKE_CASE = "pytorch_model.bin"
gdown.cached_download(__snake_case , __snake_case , quiet=__snake_case )
__SCREAMING_SNAKE_CASE = torch.load(__snake_case , map_location="cpu" )
if "model" in files:
__SCREAMING_SNAKE_CASE = files["model"]
else:
__SCREAMING_SNAKE_CASE = files["module"]
__SCREAMING_SNAKE_CASE = convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
model.eval()
# verify model on basic input
__SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__SCREAMING_SNAKE_CASE = prepare_video()
__SCREAMING_SNAKE_CASE = image_processor(__snake_case , return_tensors="pt" )
if "finetuned" not in model_name:
__SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
__SCREAMING_SNAKE_CASE = torch.load(__snake_case )
__SCREAMING_SNAKE_CASE = model(**__snake_case )
__SCREAMING_SNAKE_CASE = outputs.logits
__SCREAMING_SNAKE_CASE = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] )
elif model_name == "videomae-small-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] )
elif model_name == "videomae-base":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] )
elif model_name == "videomae-base-short":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] )
# we verified the loss both for normalized and unnormalized targets for this one
__SCREAMING_SNAKE_CASE = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] )
elif model_name == "videomae-large":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] )
elif model_name == "videomae-large-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] )
elif model_name == "videomae-huge-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] )
elif model_name == "videomae-base-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] )
elif model_name == "videomae-base-short-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] )
elif model_name == "videomae-base-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] )
elif model_name == "videomae-base-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __snake_case , atol=1e-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__SCREAMING_SNAKE_CASE = outputs.loss
assert torch.allclose(__snake_case , __snake_case , atol=1e-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
model.save_pretrained(__snake_case )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(__snake_case , organization="nielsr" )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_snake_case : Optional[int] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 693 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def lowerCAmelCase_ (lowerCAmelCase__: List[str]=None ):
"""simple docstring"""
if subparsers is not None:
UpperCAmelCase_: Optional[Any] = subparsers.add_parser("""test""" )
else:
UpperCAmelCase_: List[str] = argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" , default=__snake_case , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=__snake_case )
return parser
def lowerCAmelCase_ (lowerCAmelCase__: Optional[Any] ):
"""simple docstring"""
UpperCAmelCase_: int = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
UpperCAmelCase_: int = script_name
else:
UpperCAmelCase_: List[str] = F'--config_file={args.config_file} {script_name}'
UpperCAmelCase_: Dict = ["""accelerate-launch"""] + test_args.split()
UpperCAmelCase_: Tuple = execute_subprocess_async(__snake_case , env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: Optional[int] = test_command_parser()
UpperCAmelCase_: str = parser.parse_args()
test_command(__snake_case )
if __name__ == "__main__":
main()
| 556 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_snake_case : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
def __init__( self, *_a, **_a ) -> None:
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead.", _a, )
super().__init__(*_a, **_a )
| 693 | 0 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
UpperCamelCase ='\\n Text data.\n Second line of data.'
UpperCamelCase ='file'
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : Optional[int] ) -> str:
"""simple docstring"""
UpperCamelCase_ : List[str] = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
UpperCamelCase_ : List[str] = bytes(__snake_case , """utf-8""" )
with zstd.open(__snake_case , """wb""" ) as f:
f.write(__snake_case )
return path
@pytest.fixture
def snake_case ( a_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , __snake_case ) , """w""" ) as f:
f.write(__snake_case )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def snake_case ( a_ : List[str] , a_ : Any , a_ : Optional[int] , a_ : Optional[Any] , a_ : Optional[int] , a_ : List[Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ : Dict = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
UpperCamelCase_ : Optional[Any] = input_paths[compression_format]
UpperCamelCase_ : int = tmp_path / """cache"""
UpperCamelCase_ : List[Any] = DownloadConfig(cache_dir=__snake_case , extract_compressed_file=__snake_case )
UpperCamelCase_ : Optional[int] = cached_path(__snake_case , download_config=__snake_case )
with open(__snake_case ) as f:
UpperCamelCase_ : Union[str, Any] = f.read()
with open(__snake_case ) as f:
UpperCamelCase_ : Dict = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def snake_case ( a_ : str , a_ : Optional[int] , a_ : Tuple , a_ : List[Any] , a_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = """custom_cache"""
UpperCamelCase_ : List[str] = """custom_extracted_dir"""
UpperCamelCase_ : Dict = tmp_path / """custom_extracted_path"""
if default_extracted:
UpperCamelCase_ : Union[str, Any] = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , __snake_case )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(__snake_case ) )
UpperCamelCase_ : str = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
UpperCamelCase_ : Union[str, Any] = xz_file
UpperCamelCase_ : str = (
DownloadConfig(extract_compressed_file=__snake_case )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__snake_case )
)
UpperCamelCase_ : Optional[Any] = cached_path(__snake_case , download_config=__snake_case )
assert Path(__snake_case ).parent.parts[-2:] == expected
def snake_case ( a_ : List[Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : List[str] = str(Path(__snake_case ).resolve() )
assert cached_path(__snake_case ) == text_file
# relative path
UpperCamelCase_ : Union[str, Any] = str(Path(__snake_case ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__snake_case ) == text_file
def snake_case ( a_ : str ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(__snake_case ):
cached_path(__snake_case )
# relative path
UpperCamelCase_ : Union[str, Any] = """./__missing_file__.txt"""
with pytest.raises(__snake_case ):
cached_path(__snake_case )
def snake_case ( a_ : Any ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : List[str] = get_from_cache(f"tmp://{tmpfs_file}" )
with open(__snake_case ) as f:
UpperCamelCase_ : Union[str, Any] = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , __snake_case )
def snake_case ( ) -> List[Any]:
"""simple docstring"""
with pytest.raises(__snake_case ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , __snake_case )
def snake_case ( a_ : Any ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(__snake_case ):
http_get("""https://huggingface.co""" , temp_file=__snake_case )
with pytest.raises(__snake_case ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , __snake_case )
def snake_case ( a_ : str ) -> Any:
"""simple docstring"""
UpperCamelCase_ : int = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(__snake_case ):
ftp_get("""ftp://huggingface.co""" , temp_file=__snake_case )
with pytest.raises(__snake_case ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , __snake_case )
def snake_case ( a_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(__snake_case ):
fsspec_get("""s3://huggingface.co""" , temp_file=__snake_case )
with pytest.raises(__snake_case ):
fsspec_head("""s3://huggingface.co""" )
| 208 |
from math import sqrt
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0
for i in range(1 , int(sqrt(__snake_case ) + 1 ) ):
if n % i == 0 and i != sqrt(__snake_case ):
total += i + n // i
elif i == sqrt(__snake_case ):
total += i
return total - n
def _A ( __snake_case :int = 1_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = sum(
i
for i in range(1 , __snake_case )
if sum_of_divisors(sum_of_divisors(__snake_case ) ) == i and sum_of_divisors(__snake_case ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 693 | 0 |
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _lowerCAmelCase ( __magic_name__ : BertModel , __magic_name__ : str , __magic_name__ : str ) -> List[str]:
lowercase : Optional[Any] =('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
lowercase : Any =(
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(__snake_case ):
os.makedirs(__snake_case )
lowercase : Union[str, Any] =model.state_dict()
def to_tf_var_name(__magic_name__ : str ):
for patt, repl in iter(__snake_case ):
lowercase : Optional[Any] =name.replace(__snake_case , __snake_case )
return f'''bert/{name}'''
def create_tf_var(__magic_name__ : np.ndarray , __magic_name__ : str , __magic_name__ : tf.Session ):
lowercase : Any =tf.dtypes.as_dtype(tensor.dtype )
lowercase : str =tf.get_variable(dtype=__snake_case , shape=tensor.shape , name=__snake_case , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__snake_case )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowercase : Dict =to_tf_var_name(__snake_case )
lowercase : Optional[int] =state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowercase : int =torch_tensor.T
lowercase : Optional[int] =create_tf_var(tensor=__snake_case , name=__snake_case , session=__snake_case )
tf.keras.backend.set_value(__snake_case , __snake_case )
lowercase : Any =session.run(__snake_case )
print(f'''Successfully created {tf_name}: {np.allclose(__snake_case , __snake_case )}''' )
lowercase : List[str] =tf.train.Saver(tf.trainable_variables() )
saver.save(__snake_case , os.path.join(__snake_case , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def _lowerCAmelCase ( __magic_name__ : str=None ) -> Dict:
lowercase : int =argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=__snake_case , required=__snake_case , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=__snake_case , default=__snake_case , required=__snake_case , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=__snake_case , required=__snake_case , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=__snake_case , required=__snake_case , help='''Directory in which to save tensorflow model''' )
lowercase : List[str] =parser.parse_args(__snake_case )
lowercase : Tuple =BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__snake_case , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 92 |
def _A ( __snake_case :int , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__snake_case : Union[str, Any] = logging.get_logger(__name__)
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
a_ = ["""pixel_values"""]
def __init__( self , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = PILImageResampling.BICUBIC , UpperCamelCase__ = True , UpperCamelCase__ = 1 / 2_5_5 , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = True , **UpperCamelCase__ , ) -> None:
"""simple docstring"""
super().__init__(**_a )
UpperCAmelCase_ = size if size is not None else {"height": 3_8_4, "width": 3_8_4}
UpperCAmelCase_ = get_size_dict(_a , default_to_square=_a )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase_ = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase_ = do_convert_rgb
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = PILImageResampling.BICUBIC , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase_ = get_size_dict(_a , default_to_square=_a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
UpperCAmelCase_ = (size["height"], size["width"])
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> List[str]:
"""simple docstring"""
return rescale(_a , scale=_a , data_format=_a , **_a )
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> np.ndarray:
"""simple docstring"""
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = ChannelDimension.FIRST , **UpperCamelCase__ , ) -> PIL.Image.Image:
"""simple docstring"""
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(_a , default_to_square=_a )
UpperCAmelCase_ = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase_ = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(_a ) for image in images]
if do_resize:
UpperCAmelCase_ = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
UpperCAmelCase_ = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(_a , _a ) for image in images]
UpperCAmelCase_ = BatchFeature(data={"pixel_values": images} , tensor_type=_a )
return encoded_outputs
| 660 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a, _a=99, _a=13, _a=7, _a=9, _a=True, _a=True, _a=False, _a=32, _a=5, _a=4, _a=37, _a=8, _a=0.1, _a=0.002, _a=1, _a=0, _a=0, _a=None, _a=None, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = encoder_seq_length
__SCREAMING_SNAKE_CASE = decoder_seq_length
# For common tests
__SCREAMING_SNAKE_CASE = self.decoder_seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_attention_mask
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = d_ff
__SCREAMING_SNAKE_CASE = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE = dropout_rate
__SCREAMING_SNAKE_CASE = initializer_factor
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = decoder_start_token_id
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = decoder_layers
def __lowerCAmelCase ( self ) -> Optional[int]:
return TaConfig.from_pretrained("google/umt5-base" )
def __lowerCAmelCase ( self, _a, _a, _a, _a=None, _a=None, _a=None, _a=None, _a=None, ) -> int:
if attention_mask is None:
__SCREAMING_SNAKE_CASE = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.num_hidden_layers, config.num_attention_heads, device=_a )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.num_decoder_layers, config.num_attention_heads, device=_a )
if cross_attn_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers, config.num_attention_heads, device=_a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__SCREAMING_SNAKE_CASE = input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = decoder_input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = config.num_attention_heads
__SCREAMING_SNAKE_CASE = self.prepare_inputs_dict(_a, _a, _a )
return config, input_dict
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCAmelCase ( self ) -> Optional[int]:
return TaConfig(
vocab_size=1_66, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return TaConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a )
model.to(_a )
model.eval()
__SCREAMING_SNAKE_CASE = model(
input_ids=_a, decoder_input_ids=_a, attention_mask=_a, decoder_attention_mask=_a, )
__SCREAMING_SNAKE_CASE = model(input_ids=_a, decoder_input_ids=_a )
__SCREAMING_SNAKE_CASE = result.last_hidden_state
__SCREAMING_SNAKE_CASE = result.past_key_values
__SCREAMING_SNAKE_CASE = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_a ), config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ), 4 )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Tuple:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).get_decoder().to(_a ).eval()
# first forward pass
__SCREAMING_SNAKE_CASE = model(_a, use_cache=_a )
__SCREAMING_SNAKE_CASE = model(_a )
__SCREAMING_SNAKE_CASE = model(_a, use_cache=_a )
self.parent.assertTrue(len(_a ) == len(_a ) )
self.parent.assertTrue(len(_a ) == len(_a ) + 1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1), config.vocab_size )
# append to next input_ids and
__SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens], dim=-1 )
__SCREAMING_SNAKE_CASE = model(_a )["last_hidden_state"]
__SCREAMING_SNAKE_CASE = model(_a, past_key_values=_a )["last_hidden_state"]
# select random slice
__SCREAMING_SNAKE_CASE = ids_tensor((1,), output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a, _a, atol=1E-3 ) )
def __lowerCAmelCase ( self, _a, _a, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).to(_a ).half().eval()
__SCREAMING_SNAKE_CASE = model(**_a )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(_a ).any().item() )
@require_torch
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =(
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE__ =(UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ =(
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE__ =[0.8, 0.9]
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = UMTaModel(config_and_inputs[0] ).to(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_a, (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]), f'''{tmpdirname}/t5_test.onnx''', export_params=_a, opset_version=9, input_names=["input_ids", "decoder_input_ids"], )
@unittest.skipIf(torch_device == "cpu", "Cant do half precision" )
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_a )
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = config_and_inputs[0]
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(_a ).eval()
model.to(_a )
__SCREAMING_SNAKE_CASE = {
"head_mask": torch.zeros(config.num_layers, config.num_heads, device=_a ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ),
}
for attn_name, (name, mask) in zip(_a, head_masking.items() ):
__SCREAMING_SNAKE_CASE = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers, config.num_heads, device=_a )
__SCREAMING_SNAKE_CASE = model.generate(
config_and_inputs[1]["input_ids"], num_beams=1, max_length=3, output_attentions=_a, return_dict_in_generate=_a, **_a, )
# We check the state of decoder_attentions and cross_attentions just from the last step
__SCREAMING_SNAKE_CASE = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ), 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def __lowerCAmelCase ( self ) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration.from_pretrained("google/umt5-small", return_dict=_a ).to(_a )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("google/umt5-small", use_fast=_a, legacy=_a )
__SCREAMING_SNAKE_CASE = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
__SCREAMING_SNAKE_CASE = tokenizer(_a, return_tensors="pt", padding=_a ).input_ids
# fmt: off
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(_a, _a )
__SCREAMING_SNAKE_CASE = model.generate(input_ids.to(_a ) )
__SCREAMING_SNAKE_CASE = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_a )
self.assertEqual(_a, _a )
| 693 | 0 |
'''simple docstring'''
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : Tuple = [1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple = 0, 0, 0
_UpperCAmelCase : List[str] = ugly_nums[ia] * 2
_UpperCAmelCase : Tuple = ugly_nums[ia] * 3
_UpperCAmelCase : List[Any] = ugly_nums[ia] * 5
for _ in range(1 , __snake_case ):
_UpperCAmelCase : Union[str, Any] = min(__snake_case , __snake_case , __snake_case )
ugly_nums.append(__snake_case )
if next_num == next_a:
ia += 1
_UpperCAmelCase : List[str] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
_UpperCAmelCase : Optional[int] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
_UpperCAmelCase : str = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"{ugly_numbers(200) = }")
| 414 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _A ( __snake_case :BertModel , __snake_case :str , __snake_case :str ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
__SCREAMING_SNAKE_CASE = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__snake_case ):
os.makedirs(__snake_case )
__SCREAMING_SNAKE_CASE = model.state_dict()
def to_tf_var_name(__snake_case :str ):
for patt, repl in iter(__snake_case ):
__SCREAMING_SNAKE_CASE = name.replace(__snake_case , __snake_case )
return f'''bert/{name}'''
def create_tf_var(__snake_case :np.ndarray , __snake_case :str , __snake_case :tf.Session ):
__SCREAMING_SNAKE_CASE = tf.dtypes.as_dtype(tensor.dtype )
__SCREAMING_SNAKE_CASE = tf.get_variable(dtype=__snake_case , shape=tensor.shape , name=__snake_case , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__snake_case )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__SCREAMING_SNAKE_CASE = to_tf_var_name(__snake_case )
__SCREAMING_SNAKE_CASE = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__SCREAMING_SNAKE_CASE = torch_tensor.T
__SCREAMING_SNAKE_CASE = create_tf_var(tensor=__snake_case , name=__snake_case , session=__snake_case )
tf.keras.backend.set_value(__snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = session.run(__snake_case )
print(f'''Successfully created {tf_name}: {np.allclose(__snake_case , __snake_case )}''' )
__SCREAMING_SNAKE_CASE = tf.train.Saver(tf.trainable_variables() )
saver.save(__snake_case , os.path.join(__snake_case , model_name.replace("-" , "_" ) + ".ckpt" ) )
def _A ( __snake_case :str=None ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=__snake_case , required=__snake_case , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=__snake_case , default=__snake_case , required=__snake_case , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=__snake_case , required=__snake_case , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=__snake_case , required=__snake_case , help="Directory in which to save tensorflow model" )
__SCREAMING_SNAKE_CASE = parser.parse_args(__snake_case )
__SCREAMING_SNAKE_CASE = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__snake_case , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 693 | 0 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
lowerCAmelCase__ : List[Any] =False, False, False
@dataclass
class __lowercase :
"""simple docstring"""
_UpperCAmelCase = None
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = None
# Automatically constructed
_UpperCAmelCase = """dict"""
_UpperCAmelCase = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
_UpperCAmelCase = field(default="""Audio""" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self ):
"""simple docstring"""
return self.pa_type
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('To support encoding audio data, please install \'soundfile\'.' ) from err
if isinstance(_a , _a ):
return {"bytes": None, "path": value}
elif isinstance(_a , _a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
SCREAMING_SNAKE_CASE_ : List[str] = BytesIO()
sf.write(_a , value['array'] , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('pcm' ):
# "PCM" only has raw audio bytes
if value.get('sampling_rate' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('To use PCM files, please specify a \'sampling_rate\' in Audio object' )
if value.get('bytes' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
SCREAMING_SNAKE_CASE_ : Dict = np.frombuffer(value['bytes'] , dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
SCREAMING_SNAKE_CASE_ : Dict = np.memmap(value['path'] , dtype='h' , mode='r' ).astype(np.floataa ) / 3_2_7_6_7
SCREAMING_SNAKE_CASE_ : Any = BytesIO(bytes() )
sf.write(_a , _a , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Audio(decode=True) instead.' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = (value['path'], BytesIO(value['bytes'] )) if value['bytes'] is not None else (value['path'], None)
if path is None and file is None:
raise ValueError(F'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('To support decoding audio files, please install \'librosa\' and \'soundfile\'.' ) from err
SCREAMING_SNAKE_CASE_ : int = xsplitext(_a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '
'You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. ' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '
'You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. ' )
if file is None:
SCREAMING_SNAKE_CASE_ : Tuple = token_per_repo_id or {}
SCREAMING_SNAKE_CASE_ : Dict = path.split('::' )[-1]
try:
SCREAMING_SNAKE_CASE_ : Dict = string_to_dict(_a , config.HUB_DATASETS_URL )['repo_id']
SCREAMING_SNAKE_CASE_ : List[str] = token_per_repo_id[repo_id]
except (ValueError, KeyError):
SCREAMING_SNAKE_CASE_ : Dict = None
with xopen(_a , 'rb' , use_auth_token=_a ) as f:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = sf.read(_a )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = sf.read(_a )
SCREAMING_SNAKE_CASE_ : int = array.T
if self.mono:
SCREAMING_SNAKE_CASE_ : str = librosa.to_mono(_a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
SCREAMING_SNAKE_CASE_ : int = librosa.resample(_a , orig_sr=_a , target_sr=self.sampling_rate )
SCREAMING_SNAKE_CASE_ : Any = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCamelCase__ ( self ):
"""simple docstring"""
from .features import Value
if self.decode:
raise ValueError('Cannot flatten a decoded Audio feature.' )
return {
"bytes": Value('binary' ),
"path": Value('string' ),
}
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pa.array([None] * len(_a ) , type=pa.binary() )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
SCREAMING_SNAKE_CASE_ : Dict = pa.array([None] * len(_a ) , type=pa.string() )
SCREAMING_SNAKE_CASE_ : List[str] = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('array' ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pa.array([Audio().encode_example(_a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
SCREAMING_SNAKE_CASE_ : int = storage.field('bytes' )
else:
SCREAMING_SNAKE_CASE_ : Any = pa.array([None] * len(_a ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = storage.field('path' )
else:
SCREAMING_SNAKE_CASE_ : Dict = pa.array([None] * len(_a ) , type=pa.string() )
SCREAMING_SNAKE_CASE_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
return array_cast(_a , self.pa_type )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(lowerCAmelCase__ ):
with xopen(_a , 'rb' ) as f:
SCREAMING_SNAKE_CASE_ : Any = f.read()
return bytes_
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
SCREAMING_SNAKE_CASE_ : Dict = pa.array(
[os.path.basename(_a ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(_a , self.pa_type )
| 101 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_snake_case : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =["""input_values""", """padding_mask"""]
def __init__( self, _a = 1, _a = 2_40_00, _a = 0.0, _a = None, _a = None, **_a, ) -> str:
super().__init__(feature_size=_a, sampling_rate=_a, padding_value=_a, **_a )
__SCREAMING_SNAKE_CASE = chunk_length_s
__SCREAMING_SNAKE_CASE = overlap
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1, int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self, _a, _a = None, _a = False, _a = None, _a = None, _a = None, ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = bool(
isinstance(_a, (list, tuple) ) and (isinstance(raw_audio[0], (np.ndarray, tuple, list) )) )
if is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray(_a, dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_a, np.ndarray ):
__SCREAMING_SNAKE_CASE = np.asarray(_a, dtype=np.floataa )
elif isinstance(_a, np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray(_a ).T]
# verify inputs are valid
for idx, example in enumerate(_a ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
__SCREAMING_SNAKE_CASE = min(array.shape[0] for array in raw_audio )
__SCREAMING_SNAKE_CASE = int(np.floor(max_length / self.chunk_stride ) )
__SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
__SCREAMING_SNAKE_CASE = max(array.shape[0] for array in raw_audio )
__SCREAMING_SNAKE_CASE = int(np.ceil(max_length / self.chunk_stride ) )
__SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length
__SCREAMING_SNAKE_CASE = "max_length"
else:
__SCREAMING_SNAKE_CASE = input_values
# normal padding on batch
if padded_inputs is None:
__SCREAMING_SNAKE_CASE = self.pad(
_a, max_length=_a, truncation=_a, padding=_a, return_attention_mask=_a, )
if padding:
__SCREAMING_SNAKE_CASE = padded_inputs.pop("attention_mask" )
__SCREAMING_SNAKE_CASE = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
__SCREAMING_SNAKE_CASE = example[..., None]
input_values.append(example.T )
__SCREAMING_SNAKE_CASE = input_values
if return_tensors is not None:
__SCREAMING_SNAKE_CASE = padded_inputs.convert_to_tensors(_a )
return padded_inputs
| 693 | 0 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = [
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_, snake_case_ = emb.weight.shape
snake_case_ = nn.Linear(__snake_case , __snake_case , bias=__snake_case )
snake_case_ = emb.weight.data
return lin_layer
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.load(__snake_case , map_location='''cpu''' )
snake_case_ = Namespace(**checkpoint['''cfg''']['''model'''] )
snake_case_ = checkpoint['''model''']
remove_ignore_keys_(__snake_case )
snake_case_ = state_dict['''decoder.embed_tokens.weight'''].shape[0]
snake_case_ = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()}
snake_case_ = XGLMConfig(
vocab_size=__snake_case , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
snake_case_ = XGLMForCausalLM(__snake_case )
snake_case_ = model.load_state_dict(__snake_case , strict=__snake_case )
print(__snake_case )
snake_case_ = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path) | 39 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =42
SCREAMING_SNAKE_CASE__ =42
def __init__( self, _a, _a ) -> Dict:
super().__init__()
self.register_modules(unet=_a, scheduler=_a )
@torch.no_grad()
def __call__( self, _a = 1, _a = 20_00, _a = None, _a = "pil", _a = True, **_a, ) -> Union[ImagePipelineOutput, Tuple]:
__SCREAMING_SNAKE_CASE = self.unet.config.sample_size
__SCREAMING_SNAKE_CASE = (batch_size, 3, img_size, img_size)
__SCREAMING_SNAKE_CASE = self.unet
__SCREAMING_SNAKE_CASE = randn_tensor(_a, generator=_a ) * self.scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE = sample.to(self.device )
self.scheduler.set_timesteps(_a )
self.scheduler.set_sigmas(_a )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__SCREAMING_SNAKE_CASE = self.scheduler.sigmas[i] * torch.ones(shape[0], device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__SCREAMING_SNAKE_CASE = self.unet(_a, _a ).sample
__SCREAMING_SNAKE_CASE = self.scheduler.step_correct(_a, _a, generator=_a ).prev_sample
# prediction step
__SCREAMING_SNAKE_CASE = model(_a, _a ).sample
__SCREAMING_SNAKE_CASE = self.scheduler.step_pred(_a, _a, _a, generator=_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output.prev_sample, output.prev_sample_mean
__SCREAMING_SNAKE_CASE = sample_mean.clamp(0, 1 )
__SCREAMING_SNAKE_CASE = sample.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE = self.numpy_to_pil(_a )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_a )
| 693 | 0 |
"""simple docstring"""
import logging
from transformers import PretrainedConfig
UpperCamelCase__ = logging.getLogger(__name__)
UpperCamelCase__ = {
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class a__ ( __SCREAMING_SNAKE_CASE ):
snake_case__ = '''bertabs'''
def __init__( self : int ,a__ : int=3_0522 ,a__ : Any=512 ,a__ : Optional[int]=6 ,a__ : Tuple=512 ,a__ : int=8 ,a__ : Optional[Any]=512 ,a__ : Optional[int]=0.2 ,a__ : Any=6 ,a__ : str=768 ,a__ : Optional[Any]=8 ,a__ : str=2048 ,a__ : Dict=0.2 ,**a__ : List[str] ,) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**_a)
_lowerCAmelCase:Union[str, Any] = vocab_size
_lowerCAmelCase:Any = max_pos
_lowerCAmelCase:List[str] = enc_layers
_lowerCAmelCase:Optional[Any] = enc_hidden_size
_lowerCAmelCase:List[Any] = enc_heads
_lowerCAmelCase:Tuple = enc_ff_size
_lowerCAmelCase:Dict = enc_dropout
_lowerCAmelCase:Optional[int] = dec_layers
_lowerCAmelCase:Dict = dec_hidden_size
_lowerCAmelCase:Tuple = dec_heads
_lowerCAmelCase:List[Any] = dec_ff_size
_lowerCAmelCase:List[str] = dec_dropout
| 227 |
def _A ( __snake_case :int = 400_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__snake_case )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = b, a + b
return sum(__snake_case )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 693 | 0 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : str = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class lowercase ( __SCREAMING_SNAKE_CASE):
'''simple docstring'''
UpperCAmelCase : str = 'detr'
UpperCAmelCase : Optional[int] = ['past_key_values']
UpperCAmelCase : Optional[Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Any , snake_case : Dict=True , snake_case : Optional[Any]=None , snake_case : str=3 , snake_case : Union[str, Any]=100 , snake_case : Any=6 , snake_case : str=2048 , snake_case : List[Any]=8 , snake_case : str=6 , snake_case : Optional[Any]=2048 , snake_case : Dict=8 , snake_case : List[Any]=0.0 , snake_case : Tuple=0.0 , snake_case : List[Any]=True , snake_case : Union[str, Any]="relu" , snake_case : str=256 , snake_case : Optional[Any]=0.1 , snake_case : Union[str, Any]=0.0 , snake_case : int=0.0 , snake_case : int=0.02 , snake_case : Tuple=1.0 , snake_case : Any=False , snake_case : Optional[int]="sine" , snake_case : Any="resnet50" , snake_case : Dict=True , snake_case : Union[str, Any]=False , snake_case : Union[str, Any]=1 , snake_case : Union[str, Any]=5 , snake_case : List[Any]=2 , snake_case : Optional[int]=1 , snake_case : Any=1 , snake_case : Union[str, Any]=5 , snake_case : Optional[int]=2 , snake_case : Dict=0.1 , **snake_case : Optional[int] , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
SCREAMING_SNAKE_CASE : Optional[int] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(_a , _a ):
SCREAMING_SNAKE_CASE : int = backbone_config.get('model_type' )
SCREAMING_SNAKE_CASE : Any = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE : List[Any] = config_class.from_dict(_a )
# set timm attributes to None
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = None, None, None
SCREAMING_SNAKE_CASE : int = use_timm_backbone
SCREAMING_SNAKE_CASE : List[Any] = backbone_config
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = num_queries
SCREAMING_SNAKE_CASE : int = d_model
SCREAMING_SNAKE_CASE : Tuple = encoder_ffn_dim
SCREAMING_SNAKE_CASE : List[Any] = encoder_layers
SCREAMING_SNAKE_CASE : Any = encoder_attention_heads
SCREAMING_SNAKE_CASE : Tuple = decoder_ffn_dim
SCREAMING_SNAKE_CASE : Optional[int] = decoder_layers
SCREAMING_SNAKE_CASE : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE : Tuple = dropout
SCREAMING_SNAKE_CASE : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = activation_dropout
SCREAMING_SNAKE_CASE : Dict = activation_function
SCREAMING_SNAKE_CASE : Any = init_std
SCREAMING_SNAKE_CASE : Union[str, Any] = init_xavier_std
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : List[str] = decoder_layerdrop
SCREAMING_SNAKE_CASE : Tuple = encoder_layers
SCREAMING_SNAKE_CASE : Any = auxiliary_loss
SCREAMING_SNAKE_CASE : Optional[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : List[str] = backbone
SCREAMING_SNAKE_CASE : Tuple = use_pretrained_backbone
SCREAMING_SNAKE_CASE : int = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE : Any = class_cost
SCREAMING_SNAKE_CASE : int = bbox_cost
SCREAMING_SNAKE_CASE : Optional[Any] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE : Optional[Any] = mask_loss_coefficient
SCREAMING_SNAKE_CASE : Any = dice_loss_coefficient
SCREAMING_SNAKE_CASE : Optional[Any] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE : int = giou_loss_coefficient
SCREAMING_SNAKE_CASE : str = eos_coefficient
super().__init__(is_encoder_decoder=_a , **_a )
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return self.d_model
@classmethod
def lowerCamelCase_ ( cls : str , snake_case : int , **snake_case : Tuple ):
'''simple docstring'''
return cls(backbone_config=_a , **_a )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE : List[Any] = self.__class__.model_type
return output
class lowercase ( __SCREAMING_SNAKE_CASE):
'''simple docstring'''
UpperCAmelCase : List[str] = version.parse('1.11')
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return 1E-5
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return 12 | 352 |
from __future__ import annotations
_snake_case : str = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_snake_case : Optional[int] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = len(__snake_case )
for i in range(__snake_case ):
__SCREAMING_SNAKE_CASE = -1
for j in range(i + 1 , __snake_case ):
if arr[i] < arr[j]:
__SCREAMING_SNAKE_CASE = arr[j]
break
result.append(__snake_case )
return result
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for i, outer in enumerate(__snake_case ):
__SCREAMING_SNAKE_CASE = -1
for inner in arr[i + 1 :]:
if outer < inner:
__SCREAMING_SNAKE_CASE = inner
break
result.append(__snake_case )
return result
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = len(__snake_case )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [-1] * arr_size
for index in reversed(range(__snake_case ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__SCREAMING_SNAKE_CASE = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_snake_case : Optional[Any] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 693 | 0 |
'''simple docstring'''
from __future__ import annotations
def _a ( __lowerCAmelCase : list , __lowerCAmelCase : int ):
"""simple docstring"""
if len(__snake_case ) <= 1 or n <= 1:
return
insert_next(__snake_case , n - 1 )
rec_insertion_sort(__snake_case , n - 1 )
def _a ( __lowerCAmelCase : list , __lowerCAmelCase : int ):
"""simple docstring"""
if index >= len(__snake_case ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
snake_case__ , snake_case__ : Any = (
collection[index],
collection[index - 1],
)
insert_next(__snake_case , index + 1 )
if __name__ == "__main__":
lowerCAmelCase__ : str = input("""Enter integers separated by spaces: """)
lowerCAmelCase__ : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 347 |
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a ) -> Any:
__SCREAMING_SNAKE_CASE = data
__SCREAMING_SNAKE_CASE = None
def __repr__( self ) -> str:
return f'''Node({self.data})'''
class __SCREAMING_SNAKE_CASE :
def __init__( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = None
def __iter__( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.head
while node:
yield node.data
__SCREAMING_SNAKE_CASE = node.next
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join([str(_a ) for item in self] )
def __getitem__( self, _a ) -> Any:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self, _a, _a ) -> None:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
__SCREAMING_SNAKE_CASE = self.head
for _ in range(_a ):
__SCREAMING_SNAKE_CASE = current.next
__SCREAMING_SNAKE_CASE = data
def __lowerCAmelCase ( self, _a ) -> None:
self.insert_nth(len(self ), _a )
def __lowerCAmelCase ( self, _a ) -> None:
self.insert_nth(0, _a )
def __lowerCAmelCase ( self, _a, _a ) -> None:
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
__SCREAMING_SNAKE_CASE = Node(_a )
if self.head is None:
__SCREAMING_SNAKE_CASE = new_node
elif index == 0:
__SCREAMING_SNAKE_CASE = self.head # link new_node to head
__SCREAMING_SNAKE_CASE = new_node
else:
__SCREAMING_SNAKE_CASE = self.head
for _ in range(index - 1 ):
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = new_node
def __lowerCAmelCase ( self ) -> None: # print every node data
print(self )
def __lowerCAmelCase ( self ) -> Any:
return self.delete_nth(0 )
def __lowerCAmelCase ( self ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def __lowerCAmelCase ( self, _a = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
__SCREAMING_SNAKE_CASE = self.head # default first node
if index == 0:
__SCREAMING_SNAKE_CASE = self.head.next
else:
__SCREAMING_SNAKE_CASE = self.head
for _ in range(index - 1 ):
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next.next
return delete_node.data
def __lowerCAmelCase ( self ) -> bool:
return self.head is None
def __lowerCAmelCase ( self ) -> None:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = self.head
while current:
# Store the current node's next node.
__SCREAMING_SNAKE_CASE = current.next
# Make the current node's next point backwards
__SCREAMING_SNAKE_CASE = prev
# Make the previous node be the current node
__SCREAMING_SNAKE_CASE = current
# Make the current node the next node (to progress iteration)
__SCREAMING_SNAKE_CASE = next_node
# Return prev in order to put the head at the end
__SCREAMING_SNAKE_CASE = prev
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = LinkedList()
assert linked_list.is_empty() is True
assert str(__snake_case ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__snake_case ) == i
linked_list.insert_nth(__snake_case , i + 1 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__snake_case ) == 9
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__SCREAMING_SNAKE_CASE = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(-8 , 1 ) )
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [
-9,
100,
Node(7734_5112 ),
"dlrow olleH",
7,
5555,
0,
-1_9_2.5_5_5_5_5,
"Hello, world!",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
__SCREAMING_SNAKE_CASE = LinkedList()
for i in test_input:
linked_list.insert_tail(__snake_case )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__SCREAMING_SNAKE_CASE = linked_list.delete_head()
assert result == -9
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__SCREAMING_SNAKE_CASE = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__SCREAMING_SNAKE_CASE = linked_list.delete_nth(10 )
assert result is None
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__snake_case )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__snake_case )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _A ( ) -> Union[str, Any]:
"""simple docstring"""
from doctest import testmod
testmod()
__SCREAMING_SNAKE_CASE = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(__snake_case )
print("\nReading/changing Node data using indexing:" )
print(f'''Element at Position 1: {linked_list[1]}''' )
__SCREAMING_SNAKE_CASE = input("Enter New Value: " ).strip()
print("New list:" )
print(__snake_case )
print(f'''length of linked_list is : {len(__snake_case )}''' )
if __name__ == "__main__":
main()
| 693 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_a : Optional[Any] = logging.get_logger(__name__)
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase : str = ["pixel_values"]
def __init__( self , a_ = True , a_ = None , a_ = PILImageResampling.BILINEAR , a_ = True , a_ = None , a_ = True , a_ = 1 / 2_5_5 , a_ = True , a_ = None , a_ = None , **a_ , ) -> None:
"""simple docstring"""
super().__init__(**_a )
UpperCAmelCase = size if size is not None else {'shortest_edge': 2_5_6}
UpperCAmelCase = get_size_dict(_a , default_to_square=_a )
UpperCAmelCase = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
UpperCAmelCase = get_size_dict(_a )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = resample
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case_ ( self , a_ , a_ , a_ = PILImageResampling.BICUBIC , a_ = None , **a_ , ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCAmelCase = get_resize_output_image_size(_a , size=size['shortest_edge'] , default_to_square=_a )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def snake_case_ ( self , a_ , a_ , a_ = None , **a_ , ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase = get_size_dict(_a )
return center_crop(_a , size=(size['height'], size['width']) , data_format=_a , **_a )
def snake_case_ ( self , a_ , a_ , a_ = None , **a_ ) -> np.ndarray:
"""simple docstring"""
return rescale(_a , scale=_a , data_format=_a , **_a )
def snake_case_ ( self , a_ , a_ , a_ , a_ = None , **a_ , ) -> np.ndarray:
"""simple docstring"""
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def snake_case_ ( self , a_ , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = ChannelDimension.FIRST , **a_ , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(_a , default_to_square=_a )
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(_a )
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(_a ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
UpperCAmelCase = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(_a , _a ) for image in images]
UpperCAmelCase = {'pixel_values': images}
return BatchFeature(data=_a , tensor_type=_a )
| 447 |
import argparse
import json
from tqdm import tqdm
def _A ( ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=__snake_case , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=__snake_case , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=__snake_case , help="where to store parsed gold_data_path file" , )
__SCREAMING_SNAKE_CASE = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
__SCREAMING_SNAKE_CASE = json.load(__snake_case )
for dpr_record in tqdm(__snake_case ):
__SCREAMING_SNAKE_CASE = dpr_record["question"]
__SCREAMING_SNAKE_CASE = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(__snake_case ) + "\n" )
if __name__ == "__main__":
main()
| 693 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Tuple = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
a : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 556 |
def _A ( __snake_case :int = 10**9 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 693 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCamelCase =logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase ='\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def snake_case ( a_ : List[str] , a_ : Any , a_ : Dict=8 ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCamelCase_ : int = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
super().__init__()
self.register_modules(
unet=_a , scheduler=_a , movq=_a , )
UpperCamelCase_ : Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _UpperCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if latents is None:
UpperCamelCase_ : Union[str, Any] = randn_tensor(_a , generator=_a , device=_a , dtype=_a )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCamelCase_ : Any = latents.to(_a )
UpperCamelCase_ : Optional[Any] = latents * scheduler.init_noise_sigma
return latents
def _UpperCAmelCase ( self , __lowerCAmelCase=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
UpperCamelCase_ : str = torch.device(F"cuda:{gpu_id}" )
UpperCamelCase_ : Any = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
def _UpperCAmelCase ( self , __lowerCAmelCase=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
UpperCamelCase_ : str = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=_a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCamelCase_ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCamelCase_ , UpperCamelCase_ : List[Any] = cpu_offload_with_hook(_a , _a , prev_module_hook=_a )
# We'll offload the last model manually.
UpperCamelCase_ : List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _UpperCAmelCase ( self ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_a )
def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 5_12 , __lowerCAmelCase = 5_12 , __lowerCAmelCase = 1_00 , __lowerCAmelCase = 4.0 , __lowerCAmelCase = 1 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "pil" , __lowerCAmelCase = True , ):
UpperCamelCase_ : int = self._execution_device
UpperCamelCase_ : str = guidance_scale > 1.0
if isinstance(_a , _a ):
UpperCamelCase_ : Dict = torch.cat(_a , dim=0 )
UpperCamelCase_ : Optional[int] = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_a , _a ):
UpperCamelCase_ : Any = torch.cat(_a , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase_ : List[Any] = image_embeds.repeat_interleave(_a , dim=0 )
UpperCamelCase_ : Any = negative_image_embeds.repeat_interleave(_a , dim=0 )
UpperCamelCase_ : List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_a )
self.scheduler.set_timesteps(_a , device=_a )
UpperCamelCase_ : Any = self.scheduler.timesteps
UpperCamelCase_ : Union[str, Any] = self.unet.config.in_channels
UpperCamelCase_ , UpperCamelCase_ : Dict = downscale_height_and_width(_a , _a , self.movq_scale_factor )
# create initial latent
UpperCamelCase_ : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _a , _a , _a , self.scheduler , )
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase_ : Dict = {"""image_embeds""": image_embeds}
UpperCamelCase_ : List[str] = self.unet(
sample=_a , timestep=_a , encoder_hidden_states=_a , added_cond_kwargs=_a , return_dict=_a , )[0]
if do_classifier_free_guidance:
UpperCamelCase_ , UpperCamelCase_ : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
UpperCamelCase_ , UpperCamelCase_ : Tuple = noise_pred.chunk(2 )
UpperCamelCase_ , UpperCamelCase_ : Union[str, Any] = variance_pred.chunk(2 )
UpperCamelCase_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCamelCase_ : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCamelCase_ , UpperCamelCase_ : Any = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase_ : str = self.scheduler.step(
_a , _a , _a , generator=_a , )[0]
# post-processing
UpperCamelCase_ : Any = self.movq.decode(_a , force_not_quantize=_a )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCamelCase_ : int = image * 0.5 + 0.5
UpperCamelCase_ : List[Any] = image.clamp(0 , 1 )
UpperCamelCase_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase_ : str = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 208 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_snake_case , _snake_case , _snake_case : List[Any] = False, False, False
@dataclass
class __SCREAMING_SNAKE_CASE :
SCREAMING_SNAKE_CASE__ =None
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =None
# Automatically constructed
SCREAMING_SNAKE_CASE__ ="dict"
SCREAMING_SNAKE_CASE__ =pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
SCREAMING_SNAKE_CASE__ =field(default="""Audio""" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self ) -> Optional[int]:
return self.pa_type
def __lowerCAmelCase ( self, _a ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(_a, _a ):
return {"bytes": None, "path": value}
elif isinstance(_a, _a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__SCREAMING_SNAKE_CASE = BytesIO()
sf.write(_a, value["array"], value["sampling_rate"], format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__SCREAMING_SNAKE_CASE = np.frombuffer(value["bytes"], dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
__SCREAMING_SNAKE_CASE = np.memmap(value["path"], dtype="h", mode="r" ).astype(np.floataa ) / 3_27_67
__SCREAMING_SNAKE_CASE = BytesIO(bytes() )
sf.write(_a, _a, value["sampling_rate"], format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __lowerCAmelCase ( self, _a, _a = None ) -> dict:
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
__SCREAMING_SNAKE_CASE = xsplitext(_a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
__SCREAMING_SNAKE_CASE = token_per_repo_id or {}
__SCREAMING_SNAKE_CASE = path.split("::" )[-1]
try:
__SCREAMING_SNAKE_CASE = string_to_dict(_a, config.HUB_DATASETS_URL )["repo_id"]
__SCREAMING_SNAKE_CASE = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__SCREAMING_SNAKE_CASE = None
with xopen(_a, "rb", use_auth_token=_a ) as f:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a )
__SCREAMING_SNAKE_CASE = array.T
if self.mono:
__SCREAMING_SNAKE_CASE = librosa.to_mono(_a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
__SCREAMING_SNAKE_CASE = librosa.resample(_a, orig_sr=_a, target_sr=self.sampling_rate )
__SCREAMING_SNAKE_CASE = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __lowerCAmelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def __lowerCAmelCase ( self, _a ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
__SCREAMING_SNAKE_CASE = pa.array([Audio().encode_example(_a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
__SCREAMING_SNAKE_CASE = storage.field("bytes" )
else:
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
__SCREAMING_SNAKE_CASE = storage.field("path" )
else:
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
return array_cast(_a, self.pa_type )
def __lowerCAmelCase ( self, _a ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_a ):
with xopen(_a, "rb" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
return bytes_
__SCREAMING_SNAKE_CASE = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
__SCREAMING_SNAKE_CASE = pa.array(
[os.path.basename(_a ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(_a, self.pa_type )
| 693 | 0 |
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
UpperCamelCase_ = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
UpperCamelCase_ = get_tests_dir("""fixtures/vocab.json""")
UpperCamelCase_ = get_tests_dir("""fixtures""")
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCamelCase_ = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : List[Any] =0
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Tuple =AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(_a , _a )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Union[str, Any] =WavaVecaConfig()
lowercase : Optional[Any] =AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(_a )
processor.save_pretrained(_a )
lowercase : Optional[Any] =AutoProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(_a , os.path.join(_a , _a ) )
copyfile(_a , os.path.join(_a , '''vocab.json''' ) )
lowercase : str =AutoProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Optional[Any] =WavaVecaFeatureExtractor()
lowercase : List[Any] =AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
lowercase : Union[str, Any] =WavaVecaProcessor(_a , _a )
# save in new folder
processor.save_pretrained(_a )
# drop `processor_class` in tokenizer
with open(os.path.join(_a , _a ) , '''r''' ) as f:
lowercase : Tuple =json.load(_a )
config_dict.pop('''processor_class''' )
with open(os.path.join(_a , _a ) , '''w''' ) as f:
f.write(json.dumps(_a ) )
lowercase : Tuple =AutoProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =WavaVecaFeatureExtractor()
lowercase : str =AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
lowercase : List[str] =WavaVecaProcessor(_a , _a )
# save in new folder
processor.save_pretrained(_a )
# drop `processor_class` in feature extractor
with open(os.path.join(_a , _a ) , '''r''' ) as f:
lowercase : Union[str, Any] =json.load(_a )
config_dict.pop('''processor_class''' )
with open(os.path.join(_a , _a ) , '''w''' ) as f:
f.write(json.dumps(_a ) )
lowercase : Optional[int] =AutoProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : List[Any] =WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(_a )
# copy relevant files
copyfile(_a , os.path.join(_a , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(_a , _a ) , '''w''' ) as f:
f.write('''{}''' )
lowercase : int =AutoProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
lowercase : str =AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
lowercase : Optional[int] =AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_a )
lowercase : Any =AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_a )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
lowercase : Union[str, Any] =processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
lowercase : List[Any] =processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowercase : Optional[int] =AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_a , use_fast=_a )
lowercase : Optional[Any] =new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
try:
AutoConfig.register('''custom''' , _a )
AutoFeatureExtractor.register(_a , _a )
AutoTokenizer.register(_a , slow_tokenizer_class=_a )
AutoProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoProcessor.register(_a , _a )
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase : Any =CustomFeatureExtractor.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : Dict =os.path.join(_a , '''vocab.txt''' )
with open(_a , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
lowercase : List[str] =CustomTokenizer(_a )
lowercase : List[str] =CustomProcessor(_a , _a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(_a )
lowercase : Dict =AutoProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase_ = False
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase_ = False
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase_ = 'AutoFeatureExtractor'
lowerCamelCase_ = 'AutoTokenizer'
lowerCamelCase_ = False
try:
AutoConfig.register('''custom''' , _a )
AutoFeatureExtractor.register(_a , _a )
AutoTokenizer.register(_a , slow_tokenizer_class=_a )
AutoProcessor.register(_a , _a )
# If remote code is not set, the default is to use local classes.
lowercase : Any =AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
lowercase : List[Any] =AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_a )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
lowercase : Union[str, Any] =AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_a )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : List[str] =AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : str =AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCamelCase_ = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def lowerCamelCase_ ( cls : List[str] ):
'''simple docstring'''
lowercase : Union[str, Any] =TOKEN
HfFolder.save_token(_a )
@classmethod
def lowerCamelCase_ ( cls : int ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Union[str, Any] =WavaVecaProcessor.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_a , '''test-processor''' ) , push_to_hub=_a , use_auth_token=self._token )
lowercase : Optional[int] =WavaVecaProcessor.from_pretrained(F'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_a , getattr(new_processor.feature_extractor , _a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Dict =WavaVecaProcessor.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_a , '''test-processor-org''' ) , push_to_hub=_a , use_auth_token=self._token , organization='''valid_org''' , )
lowercase : Dict =WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_a , getattr(new_processor.feature_extractor , _a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
lowercase : Dict =CustomFeatureExtractor.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : List[str] =os.path.join(_a , '''vocab.txt''' )
with open(_a , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
lowercase : Union[str, Any] =CustomTokenizer(_a )
lowercase : Union[str, Any] =CustomProcessor(_a , _a )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'''{USER}/test-dynamic-processor''' , token=self._token )
lowercase : Dict =Repository(_a , clone_from=F'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(_a )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(_a , '''tokenizer_config.json''' ) ) as f:
lowercase : Any =json.load(_a )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(_a , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(_a , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(_a , '''custom_processing.py''' ) ) )
repo.push_to_hub()
lowercase : Tuple =AutoProcessor.from_pretrained(F'''{USER}/test-dynamic-processor''' , trust_remote_code=_a )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 92 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =(IPNDMScheduler,)
SCREAMING_SNAKE_CASE__ =(("""num_inference_steps""", 50),)
def __lowerCAmelCase ( self, **_a ) -> str:
__SCREAMING_SNAKE_CASE = {"num_train_timesteps": 10_00}
config.update(**_a )
return config
def __lowerCAmelCase ( self, _a=0, **_a ) -> List[Any]:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
if time_step is None:
__SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
__SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a )
new_scheduler.set_timesteps(_a )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self ) -> str:
pass
def __lowerCAmelCase ( self, _a=0, **_a ) -> int:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals (must be after setting timesteps)
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
if time_step is None:
__SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
__SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a )
# copy over dummy past residuals
new_scheduler.set_timesteps(_a )
# copy over dummy past residual (must be after setting timesteps)
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self, **_a ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE = 10
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(_a )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE = model(_a, _a )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE = model(_a, _a )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample
return sample
def __lowerCAmelCase ( self ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
if num_inference_steps is not None and hasattr(_a, "set_timesteps" ):
scheduler.set_timesteps(_a )
elif num_inference_steps is not None and not hasattr(_a, "set_timesteps" ):
__SCREAMING_SNAKE_CASE = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.timesteps[5]
__SCREAMING_SNAKE_CASE = scheduler.timesteps[6]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def __lowerCAmelCase ( self ) -> str:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a, time_step=_a )
def __lowerCAmelCase ( self ) -> Optional[Any]:
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=_a, time_step=_a )
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.full_loop()
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 693 | 0 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 660 |
import random
from .binary_exp_mod import bin_exp_mod
def _A ( __snake_case :List[Any] , __snake_case :Union[str, Any]=1000 ) -> int:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__SCREAMING_SNAKE_CASE = n - 1
__SCREAMING_SNAKE_CASE = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__SCREAMING_SNAKE_CASE = 0
while count < prec:
__SCREAMING_SNAKE_CASE = random.randint(2 , n - 1 )
__SCREAMING_SNAKE_CASE = bin_exp_mod(__snake_case , __snake_case , __snake_case )
if b != 1:
__SCREAMING_SNAKE_CASE = True
for _ in range(__snake_case ):
if b == n - 1:
__SCREAMING_SNAKE_CASE = False
break
__SCREAMING_SNAKE_CASE = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_snake_case : int = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 693 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
snake_case : int = 4_2
snake_case : List[str] = 4_2
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__ ):
super().__init__()
self.register_modules(unet=_a , scheduler=_a )
@torch.no_grad()
def __call__(self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 5_0 , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , **lowerCAmelCase__ , ):
_UpperCAmelCase : Union[str, Any] = self.unet.config.sample_size
_UpperCAmelCase : List[Any] = (batch_size, 3, img_size, img_size)
_UpperCAmelCase : Optional[Any] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_UpperCAmelCase : int = randn_tensor(_a , generator=_a , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_a )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_UpperCAmelCase : Dict = self.scheduler.schedule[t]
_UpperCAmelCase : List[str] = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.scheduler.add_noise_to_input(_a , _a , generator=_a )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_UpperCAmelCase : str = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_UpperCAmelCase : int = self.scheduler.step(_a , _a , _a , _a )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_UpperCAmelCase : Optional[int] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
_UpperCAmelCase : Optional[int] = self.scheduler.step_correct(
_a , _a , _a , _a , step_output.prev_sample , step_output["""derivative"""] , )
_UpperCAmelCase : List[Any] = step_output.prev_sample
_UpperCAmelCase : str = (sample / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase : List[str] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase : Optional[int] = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 414 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _A ( __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
__SCREAMING_SNAKE_CASE = ksize + 1
__SCREAMING_SNAKE_CASE = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__snake_case ):
for x in range(__snake_case ):
# distance from center
__SCREAMING_SNAKE_CASE = x - ksize // 2
__SCREAMING_SNAKE_CASE = y - ksize // 2
# degree to radiant
__SCREAMING_SNAKE_CASE = theta / 180 * np.pi
__SCREAMING_SNAKE_CASE = np.cos(_theta )
__SCREAMING_SNAKE_CASE = np.sin(_theta )
# get kernel x
__SCREAMING_SNAKE_CASE = cos_theta * px + sin_theta * py
# get kernel y
__SCREAMING_SNAKE_CASE = -sin_theta * px + cos_theta * py
# fill kernel
__SCREAMING_SNAKE_CASE = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case : Union[str, Any] = imread('../image_data/lena.jpg')
# turn image in gray scale value
_snake_case : List[str] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case : int = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
_snake_case : List[str] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case : Optional[Any] = out / out.max() * 2_55
_snake_case : Union[str, Any] = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 693 | 0 |
from __future__ import annotations
from cmath import sqrt
def a__ ( A__, A__, A__ ):
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
SCREAMING_SNAKE_CASE_ : Optional[int] = b * b - 4 * a * c
SCREAMING_SNAKE_CASE_ : int = (-b + sqrt(__snake_case )) / (2 * a)
SCREAMING_SNAKE_CASE_ : List[str] = (-b - sqrt(__snake_case )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def a__ ( ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = quadratic_roots(a=5, b=6, c=1 )
print(F'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 101 |
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
assert isinstance(__snake_case , __snake_case ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
__SCREAMING_SNAKE_CASE = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(__snake_case )
else:
__SCREAMING_SNAKE_CASE = sylvester(number - 1 )
__SCREAMING_SNAKE_CASE = num - 1
__SCREAMING_SNAKE_CASE = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 693 | 0 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return "".join(sorted(__snake_case ) )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return word_by_signature[signature(__snake_case )]
lowerCAmelCase_ = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
lowerCAmelCase_ = sorted({word.strip().lower() for word in data.splitlines()})
lowerCAmelCase_ = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
lowerCAmelCase_ = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams)) | 39 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __lowerCAmelCase ( *_a, **_a ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", )
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_a ), [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
], )
@require_tf
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", framework="tf" )
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(_a ), [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
], )
@slow
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", )
# This is an image of 2 cats with remotes and no planes
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_a ), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
@slow
@require_tf
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", framework="tf" )
# This is an image of 2 cats with remotes and no planes
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_a ), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
| 693 | 0 |
"""simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
UpperCamelCase__ = TypeVar('''KT''')
UpperCamelCase__ = TypeVar('''VT''')
class a__ ( Generic[KT, VT] ):
def __init__( self : int ,a__ : int = "root" ,a__ : List[Any] = None) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:List[str] = key
_lowerCAmelCase:int = value
_lowerCAmelCase:str = []
def __repr__( self : Any) -> str:
"""simple docstring"""
return F'Node({self.key}: {self.value})'
@property
def __UpperCamelCase ( self : Tuple) -> int:
"""simple docstring"""
return len(self.forward)
class a__ ( Generic[KT, VT] ):
def __init__( self : List[Any] ,a__ : int = 0.5 ,a__ : Tuple = 16) -> Dict:
"""simple docstring"""
_lowerCAmelCase:List[Any] = Node[KT, VT]()
_lowerCAmelCase:Optional[Any] = 0
_lowerCAmelCase:Tuple = p
_lowerCAmelCase:int = max_level
def __str__( self : Union[str, Any]) -> str:
"""simple docstring"""
_lowerCAmelCase:Tuple = list(self)
if len(_a) == 0:
return F'SkipList(level={self.level})'
_lowerCAmelCase:Optional[int] = max((len(str(_a)) for item in items) ,default=4)
_lowerCAmelCase:List[str] = max(_a ,4) + 4
_lowerCAmelCase:str = self.head
_lowerCAmelCase:Optional[int] = []
_lowerCAmelCase:Tuple = node.forward.copy()
lines.append(F'[{node.key}]'.ljust(_a ,'''-''') + '''* ''' * len(_a))
lines.append(''' ''' * label_size + '''| ''' * len(_a))
while len(node.forward) != 0:
_lowerCAmelCase:Tuple = node.forward[0]
lines.append(
F'[{node.key}]'.ljust(_a ,'''-''')
+ ''' '''.join(str(n.key) if n.key == node.key else '''|''' for n in forwards))
lines.append(''' ''' * label_size + '''| ''' * len(_a))
_lowerCAmelCase:Any = node.forward
lines.append('''None'''.ljust(_a) + '''* ''' * len(_a))
return F'SkipList(level={self.level})\n' + "\n".join(_a)
def __iter__( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase:List[str] = self.head
while len(node.forward) != 0:
yield node.forward[0].key
_lowerCAmelCase:Dict = node.forward[0]
def __UpperCamelCase ( self : int) -> int:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def __UpperCamelCase ( self : Optional[int] ,a__ : Union[str, Any]) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
"""simple docstring"""
_lowerCAmelCase:Dict = []
_lowerCAmelCase:str = self.head
for i in reversed(range(self.level)):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
_lowerCAmelCase:List[Any] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_a)
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __UpperCamelCase ( self : Optional[int] ,a__ : List[Any]) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase:Union[str, Any] = self._locate_node(_a)
if node is not None:
for i, update_node in enumerate(_a):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
_lowerCAmelCase:Any = node.forward[i]
else:
_lowerCAmelCase:Optional[int] = update_node.forward[:i]
def __UpperCamelCase ( self : Optional[int] ,a__ : Dict ,a__ : List[Any]) -> int:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase:str = self._locate_node(_a)
if node is not None:
_lowerCAmelCase:Optional[int] = value
else:
_lowerCAmelCase:str = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 ,_a):
update_vector.append(self.head)
_lowerCAmelCase:Any = level
_lowerCAmelCase:List[Any] = Node(_a ,_a)
for i, update_node in enumerate(update_vector[:level]):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i])
if update_node.level < i + 1:
update_node.forward.append(_a)
else:
_lowerCAmelCase:Any = new_node
def __UpperCamelCase ( self : str ,a__ : Tuple) -> VT | None:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase:Tuple = self._locate_node(_a)
if node is not None:
return node.value
return None
def UpperCAmelCase ( ):
_lowerCAmelCase:Any = SkipList()
skip_list.insert('''Key1''' , 3 )
skip_list.insert('''Key2''' , 12 )
skip_list.insert('''Key3''' , 41 )
skip_list.insert('''Key4''' , -19 )
_lowerCAmelCase:List[str] = skip_list.head
_lowerCAmelCase:Union[str, Any] = {}
while node.level != 0:
_lowerCAmelCase:Any = node.forward[0]
_lowerCAmelCase:List[str] = node.value
assert len(__snake_case ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def UpperCAmelCase ( ):
_lowerCAmelCase:Any = SkipList()
skip_list.insert('''Key1''' , 10 )
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''Key5''' , 7 )
skip_list.insert('''Key7''' , 10 )
skip_list.insert('''Key10''' , 5 )
skip_list.insert('''Key7''' , 7 )
skip_list.insert('''Key5''' , 5 )
skip_list.insert('''Key10''' , 10 )
_lowerCAmelCase:List[Any] = skip_list.head
_lowerCAmelCase:Optional[int] = {}
while node.level != 0:
_lowerCAmelCase:List[Any] = node.forward[0]
_lowerCAmelCase:List[Any] = node.value
if len(__snake_case ) != 4:
print()
assert len(__snake_case ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def UpperCAmelCase ( ):
_lowerCAmelCase:Dict = SkipList()
assert skip_list.find('''Some key''' ) is None
def UpperCAmelCase ( ):
_lowerCAmelCase:Dict = SkipList()
skip_list.insert('''Key2''' , 20 )
assert skip_list.find('''Key2''' ) == 20
skip_list.insert('''Some Key''' , 10 )
skip_list.insert('''Key2''' , 8 )
skip_list.insert('''V''' , 13 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 10
assert skip_list.find('''V''' ) == 13
def UpperCAmelCase ( ):
_lowerCAmelCase:Optional[int] = SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def UpperCAmelCase ( ):
_lowerCAmelCase:Optional[int] = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def UpperCAmelCase ( ):
_lowerCAmelCase:Tuple = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 14
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def UpperCAmelCase ( ):
_lowerCAmelCase:int = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 142 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''X''' )
def traverse_keys(snake_case : List[str] ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(__snake_case )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def UpperCAmelCase ( ):
def is_sorted(snake_case : Any ):
return all(next_item >= item for item, next_item in zip(__snake_case , lst[1:] ) )
_lowerCAmelCase:Optional[int] = SkipList()
for i in range(10 ):
skip_list.insert(__snake_case , __snake_case )
assert is_sorted(list(__snake_case ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(__snake_case ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(__snake_case ) )
def UpperCAmelCase ( ):
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def UpperCAmelCase ( ):
_lowerCAmelCase:int = SkipList()
skip_list.insert(2 , '''2''' )
skip_list.insert(4 , '''4''' )
skip_list.insert(6 , '''4''' )
skip_list.insert(4 , '''5''' )
skip_list.insert(8 , '''4''' )
skip_list.insert(9 , '''4''' )
skip_list.delete(4 )
print(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 227 |
from __future__ import annotations
import math
def _A ( __snake_case :int , __snake_case :int , __snake_case :bool , __snake_case :list[int] , __snake_case :float ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__snake_case ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__SCREAMING_SNAKE_CASE = math.log(len(__snake_case ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 693 | 0 |
def __a ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE : int = [0] * len(__snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : str = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__snake_case ) ):
if indegree[i] == 0:
queue.append(__snake_case )
while queue:
SCREAMING_SNAKE_CASE : Union[str, Any] = queue.pop(0 )
cnt += 1
topo.append(__snake_case )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__snake_case )
if cnt != len(__snake_case ):
print('Cycle exists' )
else:
print(__snake_case )
# Adjacency List of Graph
_lowerCamelCase : str = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph) | 352 |
def _A ( __snake_case :bytes ) -> str:
"""simple docstring"""
return "".join([hex(__snake_case )[2:].zfill(2 ).upper() for byte in list(__snake_case )] )
def _A ( __snake_case :str ) -> bytes:
"""simple docstring"""
if (len(__snake_case ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__snake_case ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(__snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 | 0 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class a ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = VideoToVideoSDPipeline
__UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
__UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
__UpperCAmelCase = PipelineTesterMixin.required_optional_params - {"""latents"""}
__UpperCAmelCase = False
# No `output_type`.
__UpperCAmelCase = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : Dict = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=3_2 , attention_head_dim=4 , )
snake_case__ : Tuple = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
snake_case__ : Tuple = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
snake_case__ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
snake_case__ : int = CLIPTextModel(_a )
snake_case__ : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case__ : List[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def __magic_name__ ( self : int , snake_case_ : Optional[Any] , snake_case_ : Optional[int]=0 ):
'''simple docstring'''
snake_case__ : List[str] = floats_tensor((1, 3, 3, 3_2, 3_2) , rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith('''mps''' ):
snake_case__ : List[str] = torch.manual_seed(_a )
else:
snake_case__ : Union[str, Any] = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''video''': video,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def __magic_name__ ( self : Dict ):
'''simple docstring'''
snake_case__ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case__ : Any = self.get_dummy_components()
snake_case__ : List[str] = VideoToVideoSDPipeline(**_a )
snake_case__ : Dict = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
snake_case__ : Optional[Any] = self.get_dummy_inputs(_a )
snake_case__ : Union[str, Any] = '''np'''
snake_case__ : Tuple = sd_pipe(**_a ).frames
snake_case__ : List[Any] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (3_2, 3_2, 3)
snake_case__ : List[str] = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_a , expected_max_diff=5e-3 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
pass
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class a ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ : str = VideoToVideoSDPipeline.from_pretrained('''cerspense/zeroscope_v2_XL''' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
snake_case__ : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case__ : int = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6) , generator=_a )
snake_case__ : List[str] = video.to('''cuda''' )
snake_case__ : Union[str, Any] = '''Spiderman is surfing'''
snake_case__ : str = pipe(_a , video=_a , generator=_a , num_inference_steps=3 , output_type='''pt''' ).frames
snake_case__ : List[str] = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 347 |
from functools import lru_cache
def _A ( __snake_case :int ) -> set:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__snake_case )
if n > 1:
factors.add(__snake_case )
return factors
@lru_cache
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
return len(unique_prime_factors(__snake_case ) )
def _A ( __snake_case :list ) -> bool:
"""simple docstring"""
return len(set(__snake_case ) ) in (0, 1)
def _A ( __snake_case :int ) -> list:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 2
while True:
# Increment each value of a generated range
__SCREAMING_SNAKE_CASE = [base + i for i in range(__snake_case )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__SCREAMING_SNAKE_CASE = [upf_len(__snake_case ) for x in group]
checker.append(__snake_case )
# If all numbers in the list are equal, return the group variable.
if equality(__snake_case ):
return group
# Increment our base variable by 1
base += 1
def _A ( __snake_case :int = 4 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = run(__snake_case )
return results[0] if len(__snake_case ) else None
if __name__ == "__main__":
print(solution())
| 693 | 0 |
'''simple docstring'''
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict ):
UpperCAmelCase = [1]
for i in range(2 , __snake_case ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCAmelCase = []
UpperCAmelCase = list(range(__snake_case ) )
# Find permutation
while factorials:
UpperCAmelCase = factorials.pop()
UpperCAmelCase , UpperCAmelCase = divmod(__snake_case , __snake_case )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 447 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _A ( __snake_case :Dict ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = VideoMAEConfig()
set_architecture_configs(__snake_case , __snake_case )
if "finetuned" not in model_name:
__SCREAMING_SNAKE_CASE = False
if "finetuned" in model_name:
__SCREAMING_SNAKE_CASE = "huggingface/label-files"
if "kinetics" in model_name:
__SCREAMING_SNAKE_CASE = 400
__SCREAMING_SNAKE_CASE = "kinetics400-id2label.json"
elif "ssv2" in model_name:
__SCREAMING_SNAKE_CASE = 174
__SCREAMING_SNAKE_CASE = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
__SCREAMING_SNAKE_CASE = {int(__snake_case ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def _A ( __snake_case :Dict , __snake_case :Optional[Any] ) -> List[Any]:
"""simple docstring"""
if "small" in model_name:
__SCREAMING_SNAKE_CASE = 384
__SCREAMING_SNAKE_CASE = 1536
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = 192
__SCREAMING_SNAKE_CASE = 768
elif "large" in model_name:
__SCREAMING_SNAKE_CASE = 1024
__SCREAMING_SNAKE_CASE = 4096
__SCREAMING_SNAKE_CASE = 24
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 512
__SCREAMING_SNAKE_CASE = 2048
elif "huge" in model_name:
__SCREAMING_SNAKE_CASE = 1280
__SCREAMING_SNAKE_CASE = 5120
__SCREAMING_SNAKE_CASE = 32
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 640
__SCREAMING_SNAKE_CASE = 2560
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def _A ( __snake_case :List[Any] ) -> Optional[int]:
"""simple docstring"""
if "encoder." in name:
__SCREAMING_SNAKE_CASE = name.replace("encoder." , "" )
if "cls_token" in name:
__SCREAMING_SNAKE_CASE = name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.self" )
if "attn" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.attention" )
if "norm1" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__SCREAMING_SNAKE_CASE = name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__SCREAMING_SNAKE_CASE = name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace("head" , "classifier" )
return name
def _A ( __snake_case :Union[str, Any] , __snake_case :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = orig_state_dict.pop(__snake_case )
if key.startswith("encoder." ):
__SCREAMING_SNAKE_CASE = key.replace("encoder." , "" )
if "qkv" in key:
__SCREAMING_SNAKE_CASE = key.split("." )
if key.startswith("decoder.blocks" ):
__SCREAMING_SNAKE_CASE = config.decoder_hidden_size
__SCREAMING_SNAKE_CASE = int(key_split[2] )
__SCREAMING_SNAKE_CASE = "decoder.decoder_layers."
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = config.hidden_size
__SCREAMING_SNAKE_CASE = int(key_split[1] )
__SCREAMING_SNAKE_CASE = "videomae.encoder.layer."
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = val
return orig_state_dict
def _A ( ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
__SCREAMING_SNAKE_CASE = np.load(__snake_case )
return list(__snake_case )
def _A ( __snake_case :Optional[int] , __snake_case :List[str] , __snake_case :Union[str, Any] , __snake_case :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_videomae_config(__snake_case )
if "finetuned" in model_name:
__SCREAMING_SNAKE_CASE = VideoMAEForVideoClassification(__snake_case )
else:
__SCREAMING_SNAKE_CASE = VideoMAEForPreTraining(__snake_case )
# download original checkpoint, hosted on Google Drive
__SCREAMING_SNAKE_CASE = "pytorch_model.bin"
gdown.cached_download(__snake_case , __snake_case , quiet=__snake_case )
__SCREAMING_SNAKE_CASE = torch.load(__snake_case , map_location="cpu" )
if "model" in files:
__SCREAMING_SNAKE_CASE = files["model"]
else:
__SCREAMING_SNAKE_CASE = files["module"]
__SCREAMING_SNAKE_CASE = convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
model.eval()
# verify model on basic input
__SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__SCREAMING_SNAKE_CASE = prepare_video()
__SCREAMING_SNAKE_CASE = image_processor(__snake_case , return_tensors="pt" )
if "finetuned" not in model_name:
__SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
__SCREAMING_SNAKE_CASE = torch.load(__snake_case )
__SCREAMING_SNAKE_CASE = model(**__snake_case )
__SCREAMING_SNAKE_CASE = outputs.logits
__SCREAMING_SNAKE_CASE = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] )
elif model_name == "videomae-small-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] )
elif model_name == "videomae-base":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] )
elif model_name == "videomae-base-short":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] )
# we verified the loss both for normalized and unnormalized targets for this one
__SCREAMING_SNAKE_CASE = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] )
elif model_name == "videomae-large":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] )
elif model_name == "videomae-large-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] )
elif model_name == "videomae-huge-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] )
elif model_name == "videomae-base-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] )
elif model_name == "videomae-base-short-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] )
elif model_name == "videomae-base-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] )
elif model_name == "videomae-base-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __snake_case , atol=1e-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__SCREAMING_SNAKE_CASE = outputs.loss
assert torch.allclose(__snake_case , __snake_case , atol=1e-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
model.save_pretrained(__snake_case )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(__snake_case , organization="nielsr" )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_snake_case : Optional[int] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 693 | 0 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCAmelCase_ (lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: List[Any]=False ):
"""simple docstring"""
UpperCAmelCase_: str = OmegaConf.load(__snake_case )
if display:
print(yaml.dump(OmegaConf.to_container(__snake_case ) ) )
return config
def lowerCAmelCase_ (lowerCAmelCase__: str , lowerCAmelCase__: List[str]=None , lowerCAmelCase__: Dict=None ):
"""simple docstring"""
if conf_path is None:
UpperCAmelCase_: Any = """./model_checkpoints/vqgan_only.yaml"""
UpperCAmelCase_: str = load_config(__snake_case , display=__snake_case )
UpperCAmelCase_: Dict = VQModel(**config.model.params )
if ckpt_path is None:
UpperCAmelCase_: Any = """./model_checkpoints/vqgan_only.pt"""
UpperCAmelCase_: str = torch.load(__snake_case , map_location=__snake_case )
if ".ckpt" in ckpt_path:
UpperCAmelCase_: List[str] = sd["""state_dict"""]
model.load_state_dict(__snake_case , strict=__snake_case )
model.to(__snake_case )
del sd
return model
def lowerCAmelCase_ (lowerCAmelCase__: Optional[int] , lowerCAmelCase__: Dict ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: int = model.encode(__snake_case )
print(F'VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}' )
UpperCAmelCase_: Union[str, Any] = model.decode(__snake_case )
return xrec
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: List[str]=False ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = string.rsplit(""".""" , 1 )
if reload:
UpperCAmelCase_: Any = importlib.import_module(__snake_case )
importlib.reload(__snake_case )
return getattr(importlib.import_module(__snake_case , package=__snake_case ) , cls )
def lowerCAmelCase_ (lowerCAmelCase__: Union[str, Any] ):
"""simple docstring"""
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def lowerCAmelCase_ (lowerCAmelCase__: Any , lowerCAmelCase__: List[Any] , lowerCAmelCase__: Tuple=True , lowerCAmelCase__: int=True ):
"""simple docstring"""
UpperCAmelCase_: int = instantiate_from_config(__snake_case )
if sd is not None:
model.load_state_dict(__snake_case )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCAmelCase_ (lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: List[Any] , lowerCAmelCase__: Any , lowerCAmelCase__: Tuple ):
"""simple docstring"""
if ckpt:
UpperCAmelCase_: str = torch.load(__snake_case , map_location="""cpu""" )
UpperCAmelCase_: Tuple = pl_sd["""global_step"""]
print(F'loaded model from global step {global_step}.' )
else:
UpperCAmelCase_: Optional[Any] = {"""state_dict""": None}
UpperCAmelCase_: Dict = None
UpperCAmelCase_: str = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=__snake_case , eval_mode=__snake_case )["""model"""]
return model, global_step
| 556 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_snake_case : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
def __init__( self, *_a, **_a ) -> None:
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead.", _a, )
super().__init__(*_a, **_a )
| 693 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase ={'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase =['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase =['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase =[
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase =[
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase =[
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
UpperCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure)
| 208 |
from math import sqrt
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0
for i in range(1 , int(sqrt(__snake_case ) + 1 ) ):
if n % i == 0 and i != sqrt(__snake_case ):
total += i + n // i
elif i == sqrt(__snake_case ):
total += i
return total - n
def _A ( __snake_case :int = 1_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = sum(
i
for i in range(1 , __snake_case )
if sum_of_divisors(sum_of_divisors(__snake_case ) ) == i and sum_of_divisors(__snake_case ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 693 | 0 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def _lowerCAmelCase ( ) -> str:
raise RuntimeError('''CUDA out of memory.''' )
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Optional[int] ):
'''simple docstring'''
super().__init__()
lowercase : int =nn.Linear(3 , 4 )
lowercase : Optional[Any] =nn.BatchNormad(4 )
lowercase : str =nn.Linear(4 , 5 )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(_a ) ) )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : List[Any] =[]
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase__ : str ):
nonlocal batch_sizes
batch_sizes.append(_a )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(_a , [128, 64, 32, 16, 8] )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Dict =[]
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str ):
nonlocal batch_sizes
batch_sizes.append(_a )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowercase , lowercase : Dict =mock_training_loop_function('''hello''' )
self.assertListEqual(_a , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(UpperCAmelCase__ : Any ):
pass
with self.assertRaises(_a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCAmelCase__ : Optional[Any] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(_a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(_a ) as cm:
mock_training_loop_function(128 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCAmelCase__ : List[Any] ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(_a ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =torch.cuda.memory_allocated()
lowercase : Tuple =ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , _a )
lowercase : Any =release_memory(_a )
self.assertEqual(torch.cuda.memory_allocated() , _a )
| 92 |
def _A ( __snake_case :int , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 | 0 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase_ :
@staticmethod
def lowerCamelCase_ ( *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class lowercase_ ( unittest.TestCase ):
a_ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase_ = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = object_detector(examples[0] , threshold=0.0 )
UpperCAmelCase_ = len(_a )
self.assertGreater(_a , 0 )
self.assertEqual(
_a , [
{
"score": ANY(_a ),
"label": ANY(_a ),
"box": {"xmin": ANY(_a ), "ymin": ANY(_a ), "xmax": ANY(_a ), "ymax": ANY(_a )},
}
for i in range(_a )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
pass
@require_torch
def lowerCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase_ = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
] , )
UpperCAmelCase_ = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
]
] , )
@require_torch
@slow
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = pipeline("zero-shot-object-detection" )
UpperCAmelCase_ = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
] , )
UpperCAmelCase_ = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
pass
@require_torch
@slow
def lowerCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = 0.2
UpperCAmelCase_ = pipeline("zero-shot-object-detection" )
UpperCAmelCase_ = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=_a , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
] , )
@require_torch
@slow
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = 2
UpperCAmelCase_ = pipeline("zero-shot-object-detection" )
UpperCAmelCase_ = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=_a , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
] , )
| 660 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a, _a=99, _a=13, _a=7, _a=9, _a=True, _a=True, _a=False, _a=32, _a=5, _a=4, _a=37, _a=8, _a=0.1, _a=0.002, _a=1, _a=0, _a=0, _a=None, _a=None, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = encoder_seq_length
__SCREAMING_SNAKE_CASE = decoder_seq_length
# For common tests
__SCREAMING_SNAKE_CASE = self.decoder_seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_attention_mask
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = d_ff
__SCREAMING_SNAKE_CASE = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE = dropout_rate
__SCREAMING_SNAKE_CASE = initializer_factor
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = decoder_start_token_id
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = decoder_layers
def __lowerCAmelCase ( self ) -> Optional[int]:
return TaConfig.from_pretrained("google/umt5-base" )
def __lowerCAmelCase ( self, _a, _a, _a, _a=None, _a=None, _a=None, _a=None, _a=None, ) -> int:
if attention_mask is None:
__SCREAMING_SNAKE_CASE = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.num_hidden_layers, config.num_attention_heads, device=_a )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.num_decoder_layers, config.num_attention_heads, device=_a )
if cross_attn_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers, config.num_attention_heads, device=_a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__SCREAMING_SNAKE_CASE = input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = decoder_input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = config.num_attention_heads
__SCREAMING_SNAKE_CASE = self.prepare_inputs_dict(_a, _a, _a )
return config, input_dict
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCAmelCase ( self ) -> Optional[int]:
return TaConfig(
vocab_size=1_66, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return TaConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a )
model.to(_a )
model.eval()
__SCREAMING_SNAKE_CASE = model(
input_ids=_a, decoder_input_ids=_a, attention_mask=_a, decoder_attention_mask=_a, )
__SCREAMING_SNAKE_CASE = model(input_ids=_a, decoder_input_ids=_a )
__SCREAMING_SNAKE_CASE = result.last_hidden_state
__SCREAMING_SNAKE_CASE = result.past_key_values
__SCREAMING_SNAKE_CASE = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_a ), config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ), 4 )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Tuple:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).get_decoder().to(_a ).eval()
# first forward pass
__SCREAMING_SNAKE_CASE = model(_a, use_cache=_a )
__SCREAMING_SNAKE_CASE = model(_a )
__SCREAMING_SNAKE_CASE = model(_a, use_cache=_a )
self.parent.assertTrue(len(_a ) == len(_a ) )
self.parent.assertTrue(len(_a ) == len(_a ) + 1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1), config.vocab_size )
# append to next input_ids and
__SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens], dim=-1 )
__SCREAMING_SNAKE_CASE = model(_a )["last_hidden_state"]
__SCREAMING_SNAKE_CASE = model(_a, past_key_values=_a )["last_hidden_state"]
# select random slice
__SCREAMING_SNAKE_CASE = ids_tensor((1,), output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a, _a, atol=1E-3 ) )
def __lowerCAmelCase ( self, _a, _a, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).to(_a ).half().eval()
__SCREAMING_SNAKE_CASE = model(**_a )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(_a ).any().item() )
@require_torch
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =(
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE__ =(UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ =(
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE__ =[0.8, 0.9]
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = UMTaModel(config_and_inputs[0] ).to(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_a, (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]), f'''{tmpdirname}/t5_test.onnx''', export_params=_a, opset_version=9, input_names=["input_ids", "decoder_input_ids"], )
@unittest.skipIf(torch_device == "cpu", "Cant do half precision" )
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_a )
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = config_and_inputs[0]
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(_a ).eval()
model.to(_a )
__SCREAMING_SNAKE_CASE = {
"head_mask": torch.zeros(config.num_layers, config.num_heads, device=_a ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ),
}
for attn_name, (name, mask) in zip(_a, head_masking.items() ):
__SCREAMING_SNAKE_CASE = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers, config.num_heads, device=_a )
__SCREAMING_SNAKE_CASE = model.generate(
config_and_inputs[1]["input_ids"], num_beams=1, max_length=3, output_attentions=_a, return_dict_in_generate=_a, **_a, )
# We check the state of decoder_attentions and cross_attentions just from the last step
__SCREAMING_SNAKE_CASE = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ), 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def __lowerCAmelCase ( self ) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration.from_pretrained("google/umt5-small", return_dict=_a ).to(_a )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("google/umt5-small", use_fast=_a, legacy=_a )
__SCREAMING_SNAKE_CASE = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
__SCREAMING_SNAKE_CASE = tokenizer(_a, return_tensors="pt", padding=_a ).input_ids
# fmt: off
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(_a, _a )
__SCREAMING_SNAKE_CASE = model.generate(input_ids.to(_a ) )
__SCREAMING_SNAKE_CASE = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_a )
self.assertEqual(_a, _a )
| 693 | 0 |
'''simple docstring'''
class __lowerCAmelCase :
def __init__(self , lowerCAmelCase__ ):
# we need a list not a string, so do something to change the type
_UpperCAmelCase : Any = arr.split(""",""" )
def snake_case_ (self ):
_UpperCAmelCase : List[Any] = [int(self.array[0] )] * len(self.array )
_UpperCAmelCase : Dict = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
_UpperCAmelCase : int = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
_UpperCAmelCase : str = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
lowerCAmelCase_ : Optional[Any] = input('''please input some numbers:''')
lowerCAmelCase_ : Union[str, Any] = SubArray(whole_array)
lowerCAmelCase_ : Optional[int] = array.solve_sub_array()
print(('''the results is:''', re))
| 414 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _A ( __snake_case :BertModel , __snake_case :str , __snake_case :str ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
__SCREAMING_SNAKE_CASE = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__snake_case ):
os.makedirs(__snake_case )
__SCREAMING_SNAKE_CASE = model.state_dict()
def to_tf_var_name(__snake_case :str ):
for patt, repl in iter(__snake_case ):
__SCREAMING_SNAKE_CASE = name.replace(__snake_case , __snake_case )
return f'''bert/{name}'''
def create_tf_var(__snake_case :np.ndarray , __snake_case :str , __snake_case :tf.Session ):
__SCREAMING_SNAKE_CASE = tf.dtypes.as_dtype(tensor.dtype )
__SCREAMING_SNAKE_CASE = tf.get_variable(dtype=__snake_case , shape=tensor.shape , name=__snake_case , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__snake_case )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__SCREAMING_SNAKE_CASE = to_tf_var_name(__snake_case )
__SCREAMING_SNAKE_CASE = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__SCREAMING_SNAKE_CASE = torch_tensor.T
__SCREAMING_SNAKE_CASE = create_tf_var(tensor=__snake_case , name=__snake_case , session=__snake_case )
tf.keras.backend.set_value(__snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = session.run(__snake_case )
print(f'''Successfully created {tf_name}: {np.allclose(__snake_case , __snake_case )}''' )
__SCREAMING_SNAKE_CASE = tf.train.Saver(tf.trainable_variables() )
saver.save(__snake_case , os.path.join(__snake_case , model_name.replace("-" , "_" ) + ".ckpt" ) )
def _A ( __snake_case :str=None ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=__snake_case , required=__snake_case , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=__snake_case , default=__snake_case , required=__snake_case , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=__snake_case , required=__snake_case , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=__snake_case , required=__snake_case , help="Directory in which to save tensorflow model" )
__SCREAMING_SNAKE_CASE = parser.parse_args(__snake_case )
__SCREAMING_SNAKE_CASE = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__snake_case , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 693 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ : List[Any] ={
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict =['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Union[str, Any] =[
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[Any] =[
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str =[
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 101 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_snake_case : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =["""input_values""", """padding_mask"""]
def __init__( self, _a = 1, _a = 2_40_00, _a = 0.0, _a = None, _a = None, **_a, ) -> str:
super().__init__(feature_size=_a, sampling_rate=_a, padding_value=_a, **_a )
__SCREAMING_SNAKE_CASE = chunk_length_s
__SCREAMING_SNAKE_CASE = overlap
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1, int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self, _a, _a = None, _a = False, _a = None, _a = None, _a = None, ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = bool(
isinstance(_a, (list, tuple) ) and (isinstance(raw_audio[0], (np.ndarray, tuple, list) )) )
if is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray(_a, dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_a, np.ndarray ):
__SCREAMING_SNAKE_CASE = np.asarray(_a, dtype=np.floataa )
elif isinstance(_a, np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray(_a ).T]
# verify inputs are valid
for idx, example in enumerate(_a ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
__SCREAMING_SNAKE_CASE = min(array.shape[0] for array in raw_audio )
__SCREAMING_SNAKE_CASE = int(np.floor(max_length / self.chunk_stride ) )
__SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
__SCREAMING_SNAKE_CASE = max(array.shape[0] for array in raw_audio )
__SCREAMING_SNAKE_CASE = int(np.ceil(max_length / self.chunk_stride ) )
__SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length
__SCREAMING_SNAKE_CASE = "max_length"
else:
__SCREAMING_SNAKE_CASE = input_values
# normal padding on batch
if padded_inputs is None:
__SCREAMING_SNAKE_CASE = self.pad(
_a, max_length=_a, truncation=_a, padding=_a, return_attention_mask=_a, )
if padding:
__SCREAMING_SNAKE_CASE = padded_inputs.pop("attention_mask" )
__SCREAMING_SNAKE_CASE = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
__SCREAMING_SNAKE_CASE = example[..., None]
input_values.append(example.T )
__SCREAMING_SNAKE_CASE = input_values
if return_tensors is not None:
__SCREAMING_SNAKE_CASE = padded_inputs.convert_to_tensors(_a )
return padded_inputs
| 693 | 0 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return round(float(moles / volume ) * nfactor )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =42
SCREAMING_SNAKE_CASE__ =42
def __init__( self, _a, _a ) -> Dict:
super().__init__()
self.register_modules(unet=_a, scheduler=_a )
@torch.no_grad()
def __call__( self, _a = 1, _a = 20_00, _a = None, _a = "pil", _a = True, **_a, ) -> Union[ImagePipelineOutput, Tuple]:
__SCREAMING_SNAKE_CASE = self.unet.config.sample_size
__SCREAMING_SNAKE_CASE = (batch_size, 3, img_size, img_size)
__SCREAMING_SNAKE_CASE = self.unet
__SCREAMING_SNAKE_CASE = randn_tensor(_a, generator=_a ) * self.scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE = sample.to(self.device )
self.scheduler.set_timesteps(_a )
self.scheduler.set_sigmas(_a )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__SCREAMING_SNAKE_CASE = self.scheduler.sigmas[i] * torch.ones(shape[0], device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__SCREAMING_SNAKE_CASE = self.unet(_a, _a ).sample
__SCREAMING_SNAKE_CASE = self.scheduler.step_correct(_a, _a, generator=_a ).prev_sample
# prediction step
__SCREAMING_SNAKE_CASE = model(_a, _a ).sample
__SCREAMING_SNAKE_CASE = self.scheduler.step_pred(_a, _a, _a, generator=_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output.prev_sample, output.prev_sample_mean
__SCREAMING_SNAKE_CASE = sample_mean.clamp(0, 1 )
__SCREAMING_SNAKE_CASE = sample.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE = self.numpy_to_pil(_a )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_a )
| 693 | 0 |
"""simple docstring"""
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class a__ :
def __init__( self : int ,a__ : Optional[int] ,a__ : List[str]=13 ,a__ : Tuple=7 ,a__ : List[Any]=True ,a__ : Optional[Any]=True ,a__ : List[str]=True ,a__ : str=True ,a__ : List[Any]=99 ,a__ : List[Any]=32 ,a__ : List[Any]=5 ,a__ : Union[str, Any]=4 ,a__ : Optional[Any]=4 ,a__ : Any="gelu" ,a__ : Tuple=0.0 ,a__ : Union[str, Any]=0.1 ,a__ : Union[str, Any]=True ,a__ : Dict=512 ,a__ : List[str]=16 ,a__ : int=2 ,a__ : int=0.02 ,a__ : Any=3 ,a__ : int=4 ,a__ : Tuple=None ,) -> str:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = parent
_lowerCAmelCase:str = batch_size
_lowerCAmelCase:Optional[int] = seq_length
_lowerCAmelCase:Union[str, Any] = is_training
_lowerCAmelCase:str = use_input_mask
_lowerCAmelCase:str = use_token_type_ids
_lowerCAmelCase:int = use_labels
_lowerCAmelCase:Tuple = vocab_size
_lowerCAmelCase:Tuple = hidden_size
_lowerCAmelCase:Optional[Any] = num_hidden_layers
_lowerCAmelCase:Tuple = num_attention_heads
_lowerCAmelCase:List[str] = intermediate_multiple_size
_lowerCAmelCase:List[str] = hidden_act
_lowerCAmelCase:Dict = hidden_dropout
_lowerCAmelCase:List[Any] = attention_dropout
_lowerCAmelCase:Optional[Any] = weight_tying
_lowerCAmelCase:Optional[int] = max_position_embeddings
_lowerCAmelCase:Tuple = type_vocab_size
_lowerCAmelCase:Optional[int] = type_sequence_label_size
_lowerCAmelCase:Optional[Any] = initializer_range
_lowerCAmelCase:Dict = num_labels
_lowerCAmelCase:Optional[int] = num_choices
_lowerCAmelCase:List[Any] = scope
def __UpperCamelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase:List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
_lowerCAmelCase:Any = None
if self.use_input_mask:
_lowerCAmelCase:Dict = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase:Any = None
if self.use_labels:
_lowerCAmelCase:Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels)
_lowerCAmelCase:List[str] = self.get_config()
return config, input_ids, input_mask, token_labels
def __UpperCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_multiple_size=self.intermediate_multiple_size ,hidden_act=self.hidden_act ,hidden_dropout=self.hidden_dropout ,attention_dropout=self.attention_dropout ,weight_tying=self.weight_tying ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,)
def __UpperCamelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase:Union[str, Any] = self.prepare_config_and_inputs()
_lowerCAmelCase:Any = True
return config, input_ids, input_mask, token_labels
def __UpperCamelCase ( self : str ,a__ : List[Any] ,a__ : List[str] ,a__ : Dict) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase:Any = GPTNeoXJapaneseModel(config=_a)
model.to(_a)
model.eval()
_lowerCAmelCase:Dict = model(_a ,attention_mask=_a)
_lowerCAmelCase:Tuple = model(_a)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
def __UpperCamelCase ( self : Optional[Any] ,a__ : Tuple ,a__ : Union[str, Any] ,a__ : Optional[int]) -> int:
"""simple docstring"""
_lowerCAmelCase:Tuple = True
_lowerCAmelCase:str = GPTNeoXJapaneseModel(_a)
model.to(_a)
model.eval()
_lowerCAmelCase:Optional[int] = model(_a ,attention_mask=_a)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
def __UpperCamelCase ( self : List[Any] ,a__ : Union[str, Any] ,a__ : str ,a__ : List[Any] ,a__ : List[Any]) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase:int = GPTNeoXJapaneseForCausalLM(config=_a)
model.to(_a)
model.eval()
_lowerCAmelCase:List[str] = model(_a ,attention_mask=_a ,labels=_a)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size))
def __UpperCamelCase ( self : Union[str, Any] ,a__ : Optional[Any] ,a__ : List[Any] ,a__ : Optional[Any]) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:str = True
_lowerCAmelCase:Union[str, Any] = GPTNeoXJapaneseForCausalLM(config=_a)
model.to(_a)
model.eval()
# first forward pass
_lowerCAmelCase:Any = model(_a ,attention_mask=_a ,use_cache=_a)
_lowerCAmelCase:int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCAmelCase:str = ids_tensor((self.batch_size, 3) ,config.vocab_size)
_lowerCAmelCase:Dict = ids_tensor((self.batch_size, 3) ,vocab_size=2)
# append to next input_ids and
_lowerCAmelCase:int = torch.cat([input_ids, next_tokens] ,dim=-1)
_lowerCAmelCase:Optional[int] = torch.cat([input_mask, next_mask] ,dim=-1)
_lowerCAmelCase:str = model(_a ,attention_mask=_a ,output_hidden_states=_a)
_lowerCAmelCase:Any = output_from_no_past['''hidden_states'''][0]
_lowerCAmelCase:Optional[Any] = model(
_a ,attention_mask=_a ,past_key_values=_a ,output_hidden_states=_a ,)['''hidden_states'''][0]
# select random slice
_lowerCAmelCase:Tuple = ids_tensor((1,) ,output_from_past.shape[-1]).item()
_lowerCAmelCase:Any = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCAmelCase:Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a ,_a ,atol=1E-3))
def __UpperCamelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase:Optional[int] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase:Any = config_and_inputs
_lowerCAmelCase:Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
snake_case__ = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
snake_case__ = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
snake_case__ = (
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def __UpperCamelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase:str = GPTNeoXJapaneseModelTester(self)
_lowerCAmelCase:str = ConfigTester(self ,config_class=_a ,hidden_size=37)
def __UpperCamelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase:Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_a ,_a ,_a)
def __UpperCamelCase ( self : str) -> str:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase:Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_a ,_a ,_a)
def __UpperCamelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase:str = self.model_tester.prepare_config_and_inputs_for_decoder()
_lowerCAmelCase:Any = None
self.model_tester.create_and_check_model_as_decoder(_a ,_a ,_a)
def __UpperCamelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase:Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_a ,_a ,_a)
def __UpperCamelCase ( self : Optional[int]) -> str:
"""simple docstring"""
_lowerCAmelCase:str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_a)
@slow
def __UpperCamelCase ( self : Dict) -> str:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = '''abeja/gpt-neox-japanese-2.7b'''
_lowerCAmelCase:Optional[int] = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
_lowerCAmelCase:Tuple = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
_lowerCAmelCase:Optional[Any] = GPTNeoXJapaneseTokenizer.from_pretrained(_a)
_lowerCAmelCase:Optional[Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(_a)
_lowerCAmelCase:str = []
for prompt in prompts:
_lowerCAmelCase:int = tokenizer(_a ,return_tensors='''pt''').input_ids
_lowerCAmelCase:List[Any] = model.generate(_a ,max_length=50)
_lowerCAmelCase:str = tokenizer.batch_decode(_a ,skip_special_tokens=_a)
predicted_outputs += generated_string
self.assertListEqual(_a ,_a)
| 227 |
def _A ( __snake_case :int = 400_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__snake_case )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = b, a + b
return sum(__snake_case )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 693 | 0 |
import argparse
import json
import subprocess
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Any = (
F'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'''
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
SCREAMING_SNAKE_CASE : str = subprocess.run(__snake_case , shell=__snake_case , stdout=subprocess.PIPE )
SCREAMING_SNAKE_CASE : Optional[int] = output.stdout.decode('utf-8' )
SCREAMING_SNAKE_CASE : Any = json.loads(__snake_case )
SCREAMING_SNAKE_CASE : Tuple = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(__snake_case )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(__snake_case ) )
if len(__snake_case ) > 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F'''The following runners are offline:\n{failed}''' )
if __name__ == "__main__":
def __a ( __lowerCAmelCase ) -> str:
return values.split(',' )
_lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
_lowerCamelCase : List[Any] = parser.parse_args()
get_runner_status(args.target_runners, args.token) | 352 |
from __future__ import annotations
_snake_case : str = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_snake_case : Optional[int] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = len(__snake_case )
for i in range(__snake_case ):
__SCREAMING_SNAKE_CASE = -1
for j in range(i + 1 , __snake_case ):
if arr[i] < arr[j]:
__SCREAMING_SNAKE_CASE = arr[j]
break
result.append(__snake_case )
return result
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for i, outer in enumerate(__snake_case ):
__SCREAMING_SNAKE_CASE = -1
for inner in arr[i + 1 :]:
if outer < inner:
__SCREAMING_SNAKE_CASE = inner
break
result.append(__snake_case )
return result
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = len(__snake_case )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [-1] * arr_size
for index in reversed(range(__snake_case ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__SCREAMING_SNAKE_CASE = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_snake_case : Optional[Any] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 693 | 0 |
'''simple docstring'''
def _a ( __lowerCAmelCase : int ):
"""simple docstring"""
if n == 1 or not isinstance(__snake_case , __snake_case ):
return 0
elif n == 2:
return 1
else:
snake_case__ : str = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _a ( __lowerCAmelCase : int ):
"""simple docstring"""
snake_case__ : Optional[Any] = 0
snake_case__ : Union[str, Any] = 2
while digits < n:
index += 1
snake_case__ : int = len(str(fibonacci(__snake_case ) ) )
return index
def _a ( __lowerCAmelCase : int = 10_00 ):
"""simple docstring"""
return fibonacci_digits_index(__snake_case )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 347 |
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a ) -> Any:
__SCREAMING_SNAKE_CASE = data
__SCREAMING_SNAKE_CASE = None
def __repr__( self ) -> str:
return f'''Node({self.data})'''
class __SCREAMING_SNAKE_CASE :
def __init__( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = None
def __iter__( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.head
while node:
yield node.data
__SCREAMING_SNAKE_CASE = node.next
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join([str(_a ) for item in self] )
def __getitem__( self, _a ) -> Any:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self, _a, _a ) -> None:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
__SCREAMING_SNAKE_CASE = self.head
for _ in range(_a ):
__SCREAMING_SNAKE_CASE = current.next
__SCREAMING_SNAKE_CASE = data
def __lowerCAmelCase ( self, _a ) -> None:
self.insert_nth(len(self ), _a )
def __lowerCAmelCase ( self, _a ) -> None:
self.insert_nth(0, _a )
def __lowerCAmelCase ( self, _a, _a ) -> None:
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
__SCREAMING_SNAKE_CASE = Node(_a )
if self.head is None:
__SCREAMING_SNAKE_CASE = new_node
elif index == 0:
__SCREAMING_SNAKE_CASE = self.head # link new_node to head
__SCREAMING_SNAKE_CASE = new_node
else:
__SCREAMING_SNAKE_CASE = self.head
for _ in range(index - 1 ):
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = new_node
def __lowerCAmelCase ( self ) -> None: # print every node data
print(self )
def __lowerCAmelCase ( self ) -> Any:
return self.delete_nth(0 )
def __lowerCAmelCase ( self ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def __lowerCAmelCase ( self, _a = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
__SCREAMING_SNAKE_CASE = self.head # default first node
if index == 0:
__SCREAMING_SNAKE_CASE = self.head.next
else:
__SCREAMING_SNAKE_CASE = self.head
for _ in range(index - 1 ):
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next.next
return delete_node.data
def __lowerCAmelCase ( self ) -> bool:
return self.head is None
def __lowerCAmelCase ( self ) -> None:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = self.head
while current:
# Store the current node's next node.
__SCREAMING_SNAKE_CASE = current.next
# Make the current node's next point backwards
__SCREAMING_SNAKE_CASE = prev
# Make the previous node be the current node
__SCREAMING_SNAKE_CASE = current
# Make the current node the next node (to progress iteration)
__SCREAMING_SNAKE_CASE = next_node
# Return prev in order to put the head at the end
__SCREAMING_SNAKE_CASE = prev
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = LinkedList()
assert linked_list.is_empty() is True
assert str(__snake_case ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__snake_case ) == i
linked_list.insert_nth(__snake_case , i + 1 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__snake_case ) == 9
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__SCREAMING_SNAKE_CASE = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(-8 , 1 ) )
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [
-9,
100,
Node(7734_5112 ),
"dlrow olleH",
7,
5555,
0,
-1_9_2.5_5_5_5_5,
"Hello, world!",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
__SCREAMING_SNAKE_CASE = LinkedList()
for i in test_input:
linked_list.insert_tail(__snake_case )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__SCREAMING_SNAKE_CASE = linked_list.delete_head()
assert result == -9
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__SCREAMING_SNAKE_CASE = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__SCREAMING_SNAKE_CASE = linked_list.delete_nth(10 )
assert result is None
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__snake_case )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__snake_case )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _A ( ) -> Union[str, Any]:
"""simple docstring"""
from doctest import testmod
testmod()
__SCREAMING_SNAKE_CASE = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(__snake_case )
print("\nReading/changing Node data using indexing:" )
print(f'''Element at Position 1: {linked_list[1]}''' )
__SCREAMING_SNAKE_CASE = input("Enter New Value: " ).strip()
print("New list:" )
print(__snake_case )
print(f'''length of linked_list is : {len(__snake_case )}''' )
if __name__ == "__main__":
main()
| 693 | 0 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowerCamelCase__ ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__snake_case ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def lowerCamelCase__ ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def lowerCamelCase__ ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__snake_case ):
http_head('https://huggingface.co' )
| 447 |
import argparse
import json
from tqdm import tqdm
def _A ( ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=__snake_case , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=__snake_case , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=__snake_case , help="where to store parsed gold_data_path file" , )
__SCREAMING_SNAKE_CASE = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
__SCREAMING_SNAKE_CASE = json.load(__snake_case )
for dpr_record in tqdm(__snake_case ):
__SCREAMING_SNAKE_CASE = dpr_record["question"]
__SCREAMING_SNAKE_CASE = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(__snake_case ) + "\n" )
if __name__ == "__main__":
main()
| 693 | 0 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _a ( __SCREAMING_SNAKE_CASE ):
def __init__(self, *SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_ ) -> Dict:
super().__init__(*_a, **_a )
UpperCAmelCase_: Optional[Any] = eval_examples
UpperCAmelCase_: Optional[int] = post_process_function
def __snake_case (self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_ = "eval" ) -> List[Any]:
UpperCAmelCase_: List[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase_: Optional[int] = self.get_eval_dataloader(_a )
UpperCAmelCase_: Optional[int] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_: Union[str, Any] = self.compute_metrics
UpperCAmelCase_: int = None
UpperCAmelCase_: List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase_: int = time.time()
try:
UpperCAmelCase_: int = eval_loop(
_a, description="""Evaluation""", prediction_loss_only=True if compute_metrics is None else None, ignore_keys=_a, metric_key_prefix=_a, )
finally:
UpperCAmelCase_: List[Any] = compute_metrics
UpperCAmelCase_: Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
_a, _a, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size ), ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase_: str = self.post_process_function(_a, _a, output.predictions )
UpperCAmelCase_: Union[str, Any] = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
UpperCAmelCase_: int = metrics.pop(_a )
metrics.update(output.metrics )
else:
UpperCAmelCase_: int = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_a )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase_: int = self.callback_handler.on_evaluate(self.args, self.state, self.control, _a )
return metrics
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_ = "test" ) -> Tuple:
UpperCAmelCase_: Dict = self.get_test_dataloader(_a )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_: Optional[Any] = self.compute_metrics
UpperCAmelCase_: str = None
UpperCAmelCase_: str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase_: Union[str, Any] = time.time()
try:
UpperCAmelCase_: List[Any] = eval_loop(
_a, description="""Prediction""", prediction_loss_only=True if compute_metrics is None else None, ignore_keys=_a, metric_key_prefix=_a, )
finally:
UpperCAmelCase_: Union[str, Any] = compute_metrics
UpperCAmelCase_: str = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
_a, _a, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size ), ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase_: Tuple = self.post_process_function(_a, _a, output.predictions, """predict""" )
UpperCAmelCase_: str = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
UpperCAmelCase_: Union[str, Any] = metrics.pop(_a )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=_a )
| 556 |
def _A ( __snake_case :int = 10**9 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 693 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase =['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
UpperCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 208 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_snake_case , _snake_case , _snake_case : List[Any] = False, False, False
@dataclass
class __SCREAMING_SNAKE_CASE :
SCREAMING_SNAKE_CASE__ =None
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =None
# Automatically constructed
SCREAMING_SNAKE_CASE__ ="dict"
SCREAMING_SNAKE_CASE__ =pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
SCREAMING_SNAKE_CASE__ =field(default="""Audio""" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self ) -> Optional[int]:
return self.pa_type
def __lowerCAmelCase ( self, _a ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(_a, _a ):
return {"bytes": None, "path": value}
elif isinstance(_a, _a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__SCREAMING_SNAKE_CASE = BytesIO()
sf.write(_a, value["array"], value["sampling_rate"], format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__SCREAMING_SNAKE_CASE = np.frombuffer(value["bytes"], dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
__SCREAMING_SNAKE_CASE = np.memmap(value["path"], dtype="h", mode="r" ).astype(np.floataa ) / 3_27_67
__SCREAMING_SNAKE_CASE = BytesIO(bytes() )
sf.write(_a, _a, value["sampling_rate"], format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __lowerCAmelCase ( self, _a, _a = None ) -> dict:
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
__SCREAMING_SNAKE_CASE = xsplitext(_a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
__SCREAMING_SNAKE_CASE = token_per_repo_id or {}
__SCREAMING_SNAKE_CASE = path.split("::" )[-1]
try:
__SCREAMING_SNAKE_CASE = string_to_dict(_a, config.HUB_DATASETS_URL )["repo_id"]
__SCREAMING_SNAKE_CASE = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__SCREAMING_SNAKE_CASE = None
with xopen(_a, "rb", use_auth_token=_a ) as f:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a )
__SCREAMING_SNAKE_CASE = array.T
if self.mono:
__SCREAMING_SNAKE_CASE = librosa.to_mono(_a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
__SCREAMING_SNAKE_CASE = librosa.resample(_a, orig_sr=_a, target_sr=self.sampling_rate )
__SCREAMING_SNAKE_CASE = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __lowerCAmelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def __lowerCAmelCase ( self, _a ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
__SCREAMING_SNAKE_CASE = pa.array([Audio().encode_example(_a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
__SCREAMING_SNAKE_CASE = storage.field("bytes" )
else:
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
__SCREAMING_SNAKE_CASE = storage.field("path" )
else:
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
return array_cast(_a, self.pa_type )
def __lowerCAmelCase ( self, _a ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_a ):
with xopen(_a, "rb" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
return bytes_
__SCREAMING_SNAKE_CASE = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
__SCREAMING_SNAKE_CASE = pa.array(
[os.path.basename(_a ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(_a, self.pa_type )
| 693 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
UpperCamelCase_ = {
'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ErnieForCausalLM',
'ErnieForMaskedLM',
'ErnieForMultipleChoice',
'ErnieForNextSentencePrediction',
'ErnieForPreTraining',
'ErnieForQuestionAnswering',
'ErnieForSequenceClassification',
'ErnieForTokenClassification',
'ErnieModel',
'ErniePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =(IPNDMScheduler,)
SCREAMING_SNAKE_CASE__ =(("""num_inference_steps""", 50),)
def __lowerCAmelCase ( self, **_a ) -> str:
__SCREAMING_SNAKE_CASE = {"num_train_timesteps": 10_00}
config.update(**_a )
return config
def __lowerCAmelCase ( self, _a=0, **_a ) -> List[Any]:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
if time_step is None:
__SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
__SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a )
new_scheduler.set_timesteps(_a )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self ) -> str:
pass
def __lowerCAmelCase ( self, _a=0, **_a ) -> int:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals (must be after setting timesteps)
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
if time_step is None:
__SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
__SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a )
# copy over dummy past residuals
new_scheduler.set_timesteps(_a )
# copy over dummy past residual (must be after setting timesteps)
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self, **_a ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE = 10
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(_a )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE = model(_a, _a )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE = model(_a, _a )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample
return sample
def __lowerCAmelCase ( self ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
if num_inference_steps is not None and hasattr(_a, "set_timesteps" ):
scheduler.set_timesteps(_a )
elif num_inference_steps is not None and not hasattr(_a, "set_timesteps" ):
__SCREAMING_SNAKE_CASE = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.timesteps[5]
__SCREAMING_SNAKE_CASE = scheduler.timesteps[6]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def __lowerCAmelCase ( self ) -> str:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a, time_step=_a )
def __lowerCAmelCase ( self ) -> Optional[Any]:
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=_a, time_step=_a )
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.full_loop()
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 693 | 0 |
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def lowerCamelCase__ ( A_ ):
UpperCAmelCase_ = min(__snake_case ) # min() finds the minimum value
UpperCAmelCase_ = max(__snake_case ) # max() finds the maximum value
UpperCAmelCase_ = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
UpperCAmelCase_ = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(__snake_case , __snake_case ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
UpperCAmelCase_ = 0
for count in range(__snake_case ):
while holes[count] > 0:
holes[count] -= 1
UpperCAmelCase_ = count + min_val
i += 1
def lowerCamelCase__ ( ):
UpperCAmelCase_ = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(__snake_case )
print("Sorted order is:" , " ".join(__snake_case ) )
if __name__ == "__main__":
main()
| 660 |
import random
from .binary_exp_mod import bin_exp_mod
def _A ( __snake_case :List[Any] , __snake_case :Union[str, Any]=1000 ) -> int:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__SCREAMING_SNAKE_CASE = n - 1
__SCREAMING_SNAKE_CASE = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__SCREAMING_SNAKE_CASE = 0
while count < prec:
__SCREAMING_SNAKE_CASE = random.randint(2 , n - 1 )
__SCREAMING_SNAKE_CASE = bin_exp_mod(__snake_case , __snake_case , __snake_case )
if b != 1:
__SCREAMING_SNAKE_CASE = True
for _ in range(__snake_case ):
if b == n - 1:
__SCREAMING_SNAKE_CASE = False
break
__SCREAMING_SNAKE_CASE = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_snake_case : int = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 693 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
snake_case : Tuple = (DEISMultistepScheduler,)
snake_case : Dict = (("""num_inference_steps""", 2_5),)
def snake_case_ (self , **lowerCAmelCase__ ):
_UpperCAmelCase : Union[str, Any] = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
}
config.update(**_a )
return config
def snake_case_ (self , lowerCAmelCase__=0 , **lowerCAmelCase__ ):
_UpperCAmelCase : List[Any] = dict(self.forward_default_kwargs )
_UpperCAmelCase : str = kwargs.pop("""num_inference_steps""" , _a )
_UpperCAmelCase : Dict = self.dummy_sample
_UpperCAmelCase : List[Any] = 0.1 * sample
_UpperCAmelCase : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : Any = self.get_scheduler_config(**_a )
_UpperCAmelCase : int = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals
_UpperCAmelCase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
_UpperCAmelCase : Optional[Any] = scheduler_class.from_pretrained(_a )
new_scheduler.set_timesteps(_a )
# copy over dummy past residuals
_UpperCAmelCase : List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase , _UpperCAmelCase : Any = sample, sample
for t in range(_a , time_step + scheduler.config.solver_order + 1 ):
_UpperCAmelCase : Tuple = scheduler.step(_a , _a , _a , **_a ).prev_sample
_UpperCAmelCase : Union[str, Any] = new_scheduler.step(_a , _a , _a , **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ (self ):
pass
def snake_case_ (self , lowerCAmelCase__=0 , **lowerCAmelCase__ ):
_UpperCAmelCase : str = dict(self.forward_default_kwargs )
_UpperCAmelCase : Dict = kwargs.pop("""num_inference_steps""" , _a )
_UpperCAmelCase : Optional[int] = self.dummy_sample
_UpperCAmelCase : Tuple = 0.1 * sample
_UpperCAmelCase : Optional[int] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : List[Any] = self.get_scheduler_config()
_UpperCAmelCase : List[Any] = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase : Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
_UpperCAmelCase : Tuple = scheduler_class.from_pretrained(_a )
# copy over dummy past residuals
new_scheduler.set_timesteps(_a )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase : List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase : str = scheduler.step(_a , _a , _a , **_a ).prev_sample
_UpperCAmelCase : Any = new_scheduler.step(_a , _a , _a , **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ (self , lowerCAmelCase__=None , **lowerCAmelCase__ ):
if scheduler is None:
_UpperCAmelCase : List[Any] = self.scheduler_classes[0]
_UpperCAmelCase : Optional[int] = self.get_scheduler_config(**_a )
_UpperCAmelCase : Optional[int] = scheduler_class(**_a )
_UpperCAmelCase : Tuple = self.scheduler_classes[0]
_UpperCAmelCase : Any = self.get_scheduler_config(**_a )
_UpperCAmelCase : Union[str, Any] = scheduler_class(**_a )
_UpperCAmelCase : Optional[int] = 1_0
_UpperCAmelCase : Optional[int] = self.dummy_model()
_UpperCAmelCase : Any = self.dummy_sample_deter
scheduler.set_timesteps(_a )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase : Optional[int] = model(_a , _a )
_UpperCAmelCase : Dict = scheduler.step(_a , _a , _a ).prev_sample
return sample
def snake_case_ (self ):
_UpperCAmelCase : List[Any] = dict(self.forward_default_kwargs )
_UpperCAmelCase : Union[str, Any] = kwargs.pop("""num_inference_steps""" , _a )
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : Optional[int] = self.get_scheduler_config()
_UpperCAmelCase : int = scheduler_class(**_a )
_UpperCAmelCase : List[str] = self.dummy_sample
_UpperCAmelCase : Dict = 0.1 * sample
if num_inference_steps is not None and hasattr(_a , """set_timesteps""" ):
scheduler.set_timesteps(_a )
elif num_inference_steps is not None and not hasattr(_a , """set_timesteps""" ):
_UpperCAmelCase : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCAmelCase : Dict = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
_UpperCAmelCase : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
_UpperCAmelCase : int = scheduler.timesteps[5]
_UpperCAmelCase : Optional[Any] = scheduler.timesteps[6]
_UpperCAmelCase : List[Any] = scheduler.step(_a , _a , _a , **_a ).prev_sample
_UpperCAmelCase : str = scheduler.step(_a , _a , _a , **_a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case_ (self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_UpperCAmelCase : Tuple = DEISMultistepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase : Optional[Any] = self.full_loop(scheduler=_a )
_UpperCAmelCase : List[Any] = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
_UpperCAmelCase : Any = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_UpperCAmelCase : Any = DPMSolverMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase : Optional[int] = UniPCMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase : int = DEISMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase : int = self.full_loop(scheduler=_a )
_UpperCAmelCase : int = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def snake_case_ (self ):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_a )
def snake_case_ (self ):
self.check_over_configs(thresholding=_a )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_a , prediction_type=_a , sample_max_value=_a , algorithm_type="""deis""" , solver_order=_a , solver_type=_a , )
def snake_case_ (self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def snake_case_ (self ):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_a , solver_type=_a , prediction_type=_a , algorithm_type=_a , )
_UpperCAmelCase : Tuple = self.full_loop(
solver_order=_a , solver_type=_a , prediction_type=_a , algorithm_type=_a , )
assert not torch.isnan(_a ).any(), "Samples have nan numbers"
def snake_case_ (self ):
self.check_over_configs(lower_order_final=_a )
self.check_over_configs(lower_order_final=_a )
def snake_case_ (self ):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=_a , time_step=0 )
def snake_case_ (self ):
_UpperCAmelCase : Any = self.full_loop()
_UpperCAmelCase : Union[str, Any] = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def snake_case_ (self ):
_UpperCAmelCase : Any = self.full_loop(prediction_type="""v_prediction""" )
_UpperCAmelCase : Any = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3
def snake_case_ (self ):
_UpperCAmelCase : Tuple = self.scheduler_classes[0]
_UpperCAmelCase : List[str] = self.get_scheduler_config(thresholding=_a , dynamic_thresholding_ratio=0 )
_UpperCAmelCase : Any = scheduler_class(**_a )
_UpperCAmelCase : List[str] = 1_0
_UpperCAmelCase : Union[str, Any] = self.dummy_model()
_UpperCAmelCase : Tuple = self.dummy_sample_deter.half()
scheduler.set_timesteps(_a )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase : Optional[int] = model(_a , _a )
_UpperCAmelCase : int = scheduler.step(_a , _a , _a ).prev_sample
assert sample.dtype == torch.floataa
| 414 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _A ( __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
__SCREAMING_SNAKE_CASE = ksize + 1
__SCREAMING_SNAKE_CASE = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__snake_case ):
for x in range(__snake_case ):
# distance from center
__SCREAMING_SNAKE_CASE = x - ksize // 2
__SCREAMING_SNAKE_CASE = y - ksize // 2
# degree to radiant
__SCREAMING_SNAKE_CASE = theta / 180 * np.pi
__SCREAMING_SNAKE_CASE = np.cos(_theta )
__SCREAMING_SNAKE_CASE = np.sin(_theta )
# get kernel x
__SCREAMING_SNAKE_CASE = cos_theta * px + sin_theta * py
# get kernel y
__SCREAMING_SNAKE_CASE = -sin_theta * px + cos_theta * py
# fill kernel
__SCREAMING_SNAKE_CASE = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case : Union[str, Any] = imread('../image_data/lena.jpg')
# turn image in gray scale value
_snake_case : List[str] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case : int = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
_snake_case : List[str] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case : Optional[Any] = out / out.max() * 2_55
_snake_case : Union[str, Any] = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 693 | 0 |
from collections import defaultdict
from math import gcd
def a__ ( A__ = 1_5_0_0_0_0_0 ):
SCREAMING_SNAKE_CASE_ : Optional[int] = defaultdict(__snake_case )
SCREAMING_SNAKE_CASE_ : List[str] = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1, __snake_case, 2 ):
if gcd(__snake_case, __snake_case ) > 1:
continue
SCREAMING_SNAKE_CASE_ : List[str] = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__snake_case, limit + 1, __snake_case ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 101 |
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
assert isinstance(__snake_case , __snake_case ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
__SCREAMING_SNAKE_CASE = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(__snake_case )
else:
__SCREAMING_SNAKE_CASE = sylvester(number - 1 )
__SCREAMING_SNAKE_CASE = num - 1
__SCREAMING_SNAKE_CASE = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 693 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
# TODO: upload to AWS
lowerCAmelCase_ = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class snake_case_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = "retribert"
def __init__( self : str , _UpperCamelCase : Any=3_0_5_2_2 , _UpperCamelCase : Any=7_6_8 , _UpperCamelCase : Dict=8 , _UpperCamelCase : Any=1_2 , _UpperCamelCase : Dict=3_0_7_2 , _UpperCamelCase : Optional[int]="gelu" , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Dict=5_1_2 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : Optional[int]=0.02 , _UpperCamelCase : Union[str, Any]=1e-12 , _UpperCamelCase : Dict=True , _UpperCamelCase : str=1_2_8 , _UpperCamelCase : List[Any]=0 , **_UpperCamelCase : Optional[Any] , ) ->Any:
super().__init__(pad_token_id=_a , **_a )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = share_encoders
snake_case_ = projection_dim | 39 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __lowerCAmelCase ( *_a, **_a ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", )
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_a ), [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
], )
@require_tf
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", framework="tf" )
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(_a ), [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
], )
@slow
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", )
# This is an image of 2 cats with remotes and no planes
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_a ), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
@slow
@require_tf
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", framework="tf" )
# This is an image of 2 cats with remotes and no planes
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_a ), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
| 693 | 0 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase:List[str] = inspect.getfile(accelerate.test_utils)
_lowerCAmelCase:Tuple = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''])
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
_lowerCAmelCase:List[str] = test_metrics
@require_cpu
def __UpperCamelCase ( self : int) -> Dict:
"""simple docstring"""
debug_launcher(self.test_metrics.main ,num_processes=1)
@require_cpu
def __UpperCamelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
debug_launcher(self.test_metrics.main)
@require_single_gpu
def __UpperCamelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
self.test_metrics.main()
@require_multi_gpu
def __UpperCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
print(F'Found {torch.cuda.device_count()} devices.')
_lowerCAmelCase:Union[str, Any] = ['''torchrun''', F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(_a ,env=os.environ.copy())
| 227 |
from __future__ import annotations
import math
def _A ( __snake_case :int , __snake_case :int , __snake_case :bool , __snake_case :list[int] , __snake_case :float ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__snake_case ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__SCREAMING_SNAKE_CASE = math.log(len(__snake_case ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 693 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.