code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowercase__ = sys.version_info >= (3, 10)
def UpperCamelCase( UpperCAmelCase_=None , UpperCAmelCase_=None ):
return field(default_factory=lambda: default , metadata=UpperCAmelCase_ )
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : int
UpperCAmelCase_ : float
UpperCAmelCase_ : str
UpperCAmelCase_ : bool
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : int = 42
UpperCAmelCase_ : str = field(default="""toto""" , metadata={"""help""": """help message"""} )
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : bool = True
UpperCAmelCase_ : Optional[bool] = None
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """titi"""
UpperCAmelCase_ : int = """toto"""
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """titi"""
UpperCAmelCase_ : Dict = """toto"""
UpperCAmelCase_ : Dict = 42
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : BasicEnum = "toto"
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
UpperCAmelCase : Tuple = BasicEnum(self.foo )
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : MixedTypeEnum = "toto"
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = MixedTypeEnum(self.foo )
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Optional[float] = field(default=_snake_case , metadata={"""help""": """help message"""} )
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[List[str]] = list_field(default=[] )
UpperCAmelCase_ : Optional[List[int]] = list_field(default=[] )
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[int] = list_field(default=[] )
UpperCAmelCase_ : List[int] = list_field(default=[1, 2, 3] )
UpperCAmelCase_ : List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
UpperCAmelCase_ : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[int] = field()
UpperCAmelCase_ : str = field()
UpperCAmelCase_ : BasicEnum = field()
def UpperCAmelCase_ ( self : str ) -> Dict:
UpperCAmelCase : Tuple = BasicEnum(self.required_enum )
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : int
UpperCAmelCase_ : "BasicEnum" = field()
UpperCAmelCase_ : "Optional[bool]" = None
UpperCAmelCase_ : "str" = field(default="""toto""" , metadata={"""help""": """help message"""} )
UpperCAmelCase_ : "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
if is_python_no_less_than_3_10:
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : bool = True
UpperCAmelCase_ : bool | None = None
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : int | None = None
UpperCAmelCase_ : float | None = field(default=_snake_case , metadata={"""help""": """help message"""} )
UpperCAmelCase_ : str | None = None
UpperCAmelCase_ : list[str] | None = list_field(default=[] )
UpperCAmelCase_ : list[int] | None = list_field(default=[] )
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict , lowercase_ : argparse.ArgumentParser , lowercase_ : argparse.ArgumentParser ) -> int:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
UpperCAmelCase : Optional[Any] = {k: v for k, v in vars(lowercase_ ).items() if k != 'container'}
UpperCAmelCase : List[Any] = {k: v for k, v in vars(lowercase_ ).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , lowercase_ ) and yy.get('choices' , lowercase_ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](lowercase_ ) , yy['type'](lowercase_ ) )
del xx["type"], yy["type"]
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
UpperCAmelCase : Dict = HfArgumentParser(lowercase_ )
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_ )
expected.add_argument('--bar' , type=lowercase_ , required=lowercase_ )
expected.add_argument('--baz' , type=lowercase_ , required=lowercase_ )
expected.add_argument('--flag' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?' )
self.argparsersEqual(lowercase_ , lowercase_ )
UpperCAmelCase : Dict = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((UpperCAmelCase) , ) : Union[str, Any] = parser.parse_args_into_dataclasses(lowercase_ , look_for_args_file=lowercase_ )
self.assertFalse(example.flag )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase : List[str] = HfArgumentParser(lowercase_ )
UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=lowercase_ )
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message' )
self.argparsersEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : int ) -> Tuple:
UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?' )
expected.add_argument('--baz' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=lowercase_ , dest='baz' )
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_ )
UpperCAmelCase : List[Any] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_ )
for dataclass_type in dataclass_types:
UpperCAmelCase : str = HfArgumentParser(lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
UpperCAmelCase : Tuple = parser.parse_args([] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
UpperCAmelCase : Optional[Any] = parser.parse_args(['--foo', '--no_baz'] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
UpperCAmelCase : Union[str, Any] = parser.parse_args(['--foo', '--baz'] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
UpperCAmelCase : Tuple = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
UpperCAmelCase : List[Any] = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
def UpperCAmelCase_ ( self : Any ) -> Any:
UpperCAmelCase : Optional[Any] = HfArgumentParser(lowercase_ )
UpperCAmelCase : List[str] = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(lowercase_ , lowercase_ )
UpperCAmelCase : List[str] = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
UpperCAmelCase : Any = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
UpperCAmelCase : Optional[Any] = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
UpperCAmelCase : Dict = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
UpperCAmelCase : List[Any] = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
UpperCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses(['--foo', '42'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def UpperCAmelCase_ ( self : List[str] ) -> int:
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : Literal["titi", "toto", 42] = "toto"
UpperCAmelCase : Dict = HfArgumentParser(lowercase_ )
UpperCAmelCase : Any = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(lowercase_ , lowercase_ )
UpperCAmelCase : Any = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
UpperCAmelCase : List[Any] = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
UpperCAmelCase : Union[str, Any] = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase : List[str] = HfArgumentParser(lowercase_ )
UpperCAmelCase : Dict = argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=lowercase_ )
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=lowercase_ )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_ )
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
UpperCAmelCase : int = parser.parse_args([] )
self.assertEqual(
lowercase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , )
UpperCAmelCase : Optional[Any] = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() )
self.assertEqual(lowercase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase : Any = argparse.ArgumentParser()
expected.add_argument('--foo' , default=lowercase_ , type=lowercase_ )
expected.add_argument('--bar' , default=lowercase_ , type=lowercase_ , help='help message' )
expected.add_argument('--baz' , default=lowercase_ , type=lowercase_ )
expected.add_argument('--ces' , nargs='+' , default=[] , type=lowercase_ )
expected.add_argument('--des' , nargs='+' , default=[] , type=lowercase_ )
UpperCAmelCase : str = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_ )
for dataclass_type in dataclass_types:
UpperCAmelCase : Union[str, Any] = HfArgumentParser(lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
UpperCAmelCase : Optional[Any] = parser.parse_args([] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , bar=lowercase_ , baz=lowercase_ , ces=[] , des=[] ) )
UpperCAmelCase : List[Any] = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() )
self.assertEqual(lowercase_ , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
UpperCAmelCase : Tuple = HfArgumentParser(lowercase_ )
UpperCAmelCase : Tuple = argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=lowercase_ , required=lowercase_ )
expected.add_argument('--required_str' , type=lowercase_ , required=lowercase_ )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=lowercase_ , )
self.argparsersEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
UpperCAmelCase : Union[str, Any] = HfArgumentParser(lowercase_ )
UpperCAmelCase : int = argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_ )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=lowercase_ , )
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_ )
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message' )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase : str = HfArgumentParser(lowercase_ )
UpperCAmelCase : str = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
UpperCAmelCase : str = parser.parse_dict(lowercase_ )[0]
UpperCAmelCase : Union[str, Any] = BasicExample(**lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
UpperCAmelCase : Tuple = HfArgumentParser(lowercase_ )
UpperCAmelCase : List[str] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(lowercase_ , parser.parse_dict , lowercase_ , allow_extra_keys=lowercase_ )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : List[str] = HfArgumentParser(lowercase_ )
UpperCAmelCase : Optional[int] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : Union[str, Any] = os.path.join(lowercase_ , 'temp_json' )
os.mkdir(lowercase_ )
with open(temp_local_path + '.json' , 'w+' ) as f:
json.dump(lowercase_ , lowercase_ )
UpperCAmelCase : str = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0]
UpperCAmelCase : Tuple = BasicExample(**lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
UpperCAmelCase : Dict = HfArgumentParser(lowercase_ )
UpperCAmelCase : List[Any] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : Dict = os.path.join(lowercase_ , 'temp_yaml' )
os.mkdir(lowercase_ )
with open(temp_local_path + '.yaml' , 'w+' ) as f:
yaml.dump(lowercase_ , lowercase_ )
UpperCAmelCase : List[str] = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0]
UpperCAmelCase : Any = BasicExample(**lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
UpperCAmelCase : Dict = HfArgumentParser(lowercase_ )
self.assertIsNotNone(lowercase_ )
| 695 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """pix2struct_text_model"""
UpperCAmelCase_ : Union[str, Any] = ["""past_key_values"""]
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : int , lowercase_ : str=50_244 , lowercase_ : Tuple=768 , lowercase_ : List[Any]=64 , lowercase_ : List[Any]=2_048 , lowercase_ : Optional[Any]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=32 , lowercase_ : List[str]=128 , lowercase_ : List[Any]=0.1 , lowercase_ : List[str]=1E-6 , lowercase_ : Union[str, Any]=1.0 , lowercase_ : Dict="gelu_new" , lowercase_ : Any=0 , lowercase_ : Any=False , lowercase_ : List[Any]=0 , lowercase_ : Tuple=1 , lowercase_ : List[str]=False , lowercase_ : List[Any]=True , **lowercase_ : Union[str, Any] , ) -> Dict:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : int = hidden_size
UpperCAmelCase : List[Any] = d_kv
UpperCAmelCase : Any = d_ff
UpperCAmelCase : List[str] = num_layers
UpperCAmelCase : str = num_heads
UpperCAmelCase : List[Any] = relative_attention_num_buckets
UpperCAmelCase : Tuple = relative_attention_max_distance
UpperCAmelCase : str = dropout_rate
UpperCAmelCase : Optional[int] = layer_norm_epsilon
UpperCAmelCase : int = initializer_factor
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : List[Any] = eos_token_id
UpperCAmelCase : Union[str, Any] = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase : List[str] = dense_act_fn
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , tie_word_embeddings=lowercase_ , is_decoder=lowercase_ , **lowercase_ , )
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : str = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : Any = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = """pix2struct_vision_model"""
def __init__( self : str , lowercase_ : Any=768 , lowercase_ : Union[str, Any]=768 , lowercase_ : Union[str, Any]=2_048 , lowercase_ : Tuple=64 , lowercase_ : Dict=12 , lowercase_ : Optional[int]=12 , lowercase_ : int="gelu_new" , lowercase_ : List[Any]=1E-6 , lowercase_ : Optional[int]=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : str=1E-10 , lowercase_ : Dict=1.0 , lowercase_ : int=4_096 , lowercase_ : Tuple=32 , lowercase_ : Any=128 , **lowercase_ : Any , ) -> Tuple:
super().__init__(**lowercase_ )
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : Any = patch_embed_hidden_size
UpperCAmelCase : Optional[int] = d_ff
UpperCAmelCase : Dict = dropout_rate
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : str = initializer_factor
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : Union[str, Any] = dense_act_fn
UpperCAmelCase : Dict = seq_len
UpperCAmelCase : Optional[int] = relative_attention_num_buckets
UpperCAmelCase : Union[str, Any] = relative_attention_max_distance
UpperCAmelCase : str = d_kv
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Any ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Tuple = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : List[str] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """pix2struct"""
UpperCAmelCase_ : Dict = True
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : int=None , lowercase_ : Optional[Any]=1.0 , lowercase_ : List[str]=0.02 , lowercase_ : str=False , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , **lowercase_ : Optional[Any] , ) -> str:
super().__init__(tie_word_embeddings=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ )
if text_config is None:
UpperCAmelCase : Optional[int] = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
UpperCAmelCase : List[str] = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
UpperCAmelCase : Optional[Any] = PixaStructTextConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = PixaStructVisionConfig(**lowercase_ )
UpperCAmelCase : Optional[Any] = self.text_config.decoder_start_token_id
UpperCAmelCase : str = self.text_config.pad_token_id
UpperCAmelCase : Optional[int] = self.text_config.eos_token_id
UpperCAmelCase : Union[str, Any] = initializer_factor
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : str = is_vqa
@classmethod
def UpperCAmelCase_ ( cls : Tuple , lowercase_ : PixaStructTextConfig , lowercase_ : PixaStructVisionConfig , **lowercase_ : str ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.text_config.to_dict()
UpperCAmelCase : Dict = self.vision_config.to_dict()
UpperCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 695 | 1 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
lowercase__ = "\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n"
lowercase__ = "\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results['pearsonr'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n ['p-value', 'pearsonr']\n >>> print(round(results['pearsonr'], 2))\n -0.74\n >>> print(round(results['p-value'], 2))\n 0.15\n"
lowercase__ = "\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] , )
def UpperCAmelCase_ ( self : str , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : List[str]=False ) -> Optional[int]:
if return_pvalue:
UpperCAmelCase : List[str] = pearsonr(lowercase_ , lowercase_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowercase_ , lowercase_ )[0] )}
| 695 |
'''simple docstring'''
import baseaa
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baaencode(string.encode('utf-8' ) )
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baadecode(UpperCAmelCase_ ).decode('utf-8' )
if __name__ == "__main__":
lowercase__ = "Hello World!"
lowercase__ = baseaa_encode(test)
print(encoded)
lowercase__ = baseaa_decode(encoded)
print(decoded)
| 695 | 1 |
'''simple docstring'''
from math import factorial
class A_ :
'''simple docstring'''
def __init__( self : int , lowercase_ : Union[str, Any] , lowercase_ : Any ) -> List[str]:
UpperCAmelCase : Dict = real
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : Dict = [1] * rank
else:
UpperCAmelCase : Tuple = rank
def __repr__( self : Optional[int] ) -> List[Any]:
return (
f"""{self.real}+"""
f"""{"+".join(str(lowercase_ )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowercase_ )
def __add__( self : Optional[int] , lowercase_ : Tuple ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
return Dual(self.real + other , self.duals )
UpperCAmelCase : Optional[Any] = self.duals.copy()
UpperCAmelCase : str = other.duals.copy()
if len(lowercase_ ) > len(lowercase_ ):
o_dual.extend([1] * (len(lowercase_ ) - len(lowercase_ )) )
elif len(lowercase_ ) < len(lowercase_ ):
s_dual.extend([1] * (len(lowercase_ ) - len(lowercase_ )) )
UpperCAmelCase : List[Any] = []
for i in range(len(lowercase_ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowercase_ )
UpperCAmelCase_ : int = __add__
def __sub__( self : List[str] , lowercase_ : Union[str, Any] ) -> Tuple:
return self + other * -1
def __mul__( self : str , lowercase_ : Tuple ) -> Dict:
if not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : List[Any] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowercase_ )
UpperCAmelCase : Dict = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowercase_ )
UpperCAmelCase_ : str = __mul__
def __truediv__( self : int , lowercase_ : Dict ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : Any = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowercase_ )
raise ValueError
def __floordiv__( self : Any , lowercase_ : List[str] ) -> List[Any]:
if not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowercase_ )
raise ValueError
def __pow__( self : int , lowercase_ : Tuple ) -> Dict:
if n < 0 or isinstance(lowercase_ , lowercase_ ):
raise ValueError('power must be a positive integer' )
if n == 0:
return 1
if n == 1:
return self
UpperCAmelCase : int = self
for _ in range(n - 1 ):
x *= self
return x
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if not callable(UpperCAmelCase_ ):
raise ValueError('differentiate() requires a function as input for func' )
if not isinstance(UpperCAmelCase_ , (float, int) ):
raise ValueError('differentiate() requires a float as input for position' )
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise ValueError('differentiate() requires an int as input for order' )
UpperCAmelCase : List[Any] = Dual(UpperCAmelCase_ , 1 )
UpperCAmelCase : Dict = func(UpperCAmelCase_ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def UpperCamelCase( UpperCAmelCase_ ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 695 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if dataset.ndim != value_array.ndim:
UpperCAmelCase : str = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(UpperCAmelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCAmelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(UpperCAmelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCAmelCase : List[str] = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(UpperCAmelCase_ )
UpperCAmelCase : str = []
for value in value_array:
UpperCAmelCase : Optional[Any] = euclidean(UpperCAmelCase_ , dataset[0] )
UpperCAmelCase : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCAmelCase : Tuple = euclidean(UpperCAmelCase_ , UpperCAmelCase_ )
if dist > temp_dist:
UpperCAmelCase : List[str] = temp_dist
UpperCAmelCase : str = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) / (norm(UpperCAmelCase_ ) * norm(UpperCAmelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_snake_case )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
UpperCAmelCase_ : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
UpperCAmelCase_ : ClassVar[Features] = Features({} )
UpperCAmelCase_ : str = "text"
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict[str, str]:
return {self.text_column: "text"}
| 695 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase__ = get_tests_dir("fixtures")
lowercase__ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
lowercase__ = get_tests_dir("fixtures/dummy-config.json")
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : Optional[Any] = 0
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ ).to_dict()
config_dict.pop('feature_extractor_type' )
UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor(**lowercase_ )
# save in new folder
model_config.save_pretrained(lowercase_ )
config.save_pretrained(lowercase_ )
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(lowercase_ )
# make sure private variable is not incorrectly saved
UpperCAmelCase : List[Any] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('bert-base' )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
with self.assertRaisesRegex(
lowercase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase : int = AutoFeatureExtractor.from_pretrained(lowercase_ , revision='aaaaaa' )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Any = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase : Dict = CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase_ ( self : int ) -> Tuple:
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = True
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# If remote code is not set, the default is to use local
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(lowercase_ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 695 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if dataset.ndim != value_array.ndim:
UpperCAmelCase : str = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(UpperCAmelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCAmelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(UpperCAmelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCAmelCase : List[str] = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(UpperCAmelCase_ )
UpperCAmelCase : str = []
for value in value_array:
UpperCAmelCase : Optional[Any] = euclidean(UpperCAmelCase_ , dataset[0] )
UpperCAmelCase : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCAmelCase : Tuple = euclidean(UpperCAmelCase_ , UpperCAmelCase_ )
if dist > temp_dist:
UpperCAmelCase : List[str] = temp_dist
UpperCAmelCase : str = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) / (norm(UpperCAmelCase_ ) * norm(UpperCAmelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 |
'''simple docstring'''
from datetime import datetime
import requests
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
UpperCAmelCase : List[str] = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(UpperCAmelCase_ ).content
if __name__ == "__main__":
lowercase__ = input("Enter Video/IGTV url: ").strip()
lowercase__ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 695 | 1 |
'''simple docstring'''
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[str] , lowercase_ : UNetaDModel , lowercase_ : UNetaDModel , lowercase_ : DDPMScheduler , lowercase_ : Optional[int] , ) -> str:
super().__init__()
UpperCAmelCase : str = value_function
UpperCAmelCase : Union[str, Any] = unet
UpperCAmelCase : str = scheduler
UpperCAmelCase : str = env
UpperCAmelCase : Tuple = env.get_dataset()
UpperCAmelCase : Dict = {}
for key in self.data.keys():
try:
UpperCAmelCase : Any = self.data[key].mean()
except: # noqa: E722
pass
UpperCAmelCase : List[str] = {}
for key in self.data.keys():
try:
UpperCAmelCase : Optional[int] = self.data[key].std()
except: # noqa: E722
pass
UpperCAmelCase : List[str] = env.observation_space.shape[0]
UpperCAmelCase : Union[str, Any] = env.action_space.shape[0]
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : Any ) -> Dict:
return (x_in - self.means[key]) / self.stds[key]
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : Optional[Any] ) -> List[Any]:
return x_in * self.stds[key] + self.means[key]
def UpperCAmelCase_ ( self : Tuple , lowercase_ : List[str] ) -> Any:
if type(lowercase_ ) is dict:
return {k: self.to_torch(lowercase_ ) for k, v in x_in.items()}
elif torch.is_tensor(lowercase_ ):
return x_in.to(self.unet.device )
return torch.tensor(lowercase_ , device=self.unet.device )
def UpperCAmelCase_ ( self : int , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[Any] ) -> Tuple:
for key, val in cond.items():
UpperCAmelCase : Optional[int] = val.clone()
return x_in
def UpperCAmelCase_ ( self : str , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : Union[str, Any] ) -> Dict:
UpperCAmelCase : Dict = x.shape[0]
UpperCAmelCase : Optional[Any] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCAmelCase : Dict = torch.full((batch_size,) , lowercase_ , device=self.unet.device , dtype=torch.long )
for _ in range(lowercase_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCAmelCase : Union[str, Any] = self.value_function(x.permute(0 , 2 , 1 ) , lowercase_ ).sample
UpperCAmelCase : Tuple = torch.autograd.grad([y.sum()] , [x] )[0]
UpperCAmelCase : Optional[Any] = self.scheduler._get_variance(lowercase_ )
UpperCAmelCase : Dict = torch.exp(0.5 * posterior_variance )
UpperCAmelCase : List[Any] = model_std * grad
UpperCAmelCase : Dict = 0
UpperCAmelCase : Union[str, Any] = x.detach()
UpperCAmelCase : List[Any] = x + scale * grad
UpperCAmelCase : Optional[int] = self.reset_xa(lowercase_ , lowercase_ , self.action_dim )
UpperCAmelCase : Dict = self.unet(x.permute(0 , 2 , 1 ) , lowercase_ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
UpperCAmelCase : int = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , predict_epsilon=lowercase_ )['prev_sample']
# apply conditions to the trajectory (set the initial state)
UpperCAmelCase : Tuple = self.reset_xa(lowercase_ , lowercase_ , self.action_dim )
UpperCAmelCase : Optional[int] = self.to_torch(lowercase_ )
return x, y
def __call__( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Dict=64 , lowercase_ : Dict=32 , lowercase_ : Any=2 , lowercase_ : Tuple=0.1 ) -> int:
# normalize the observations and create batch dimension
UpperCAmelCase : Optional[int] = self.normalize(lowercase_ , 'observations' )
UpperCAmelCase : str = obs[None].repeat(lowercase_ , axis=0 )
UpperCAmelCase : Optional[int] = {0: self.to_torch(lowercase_ )}
UpperCAmelCase : int = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCAmelCase : Tuple = randn_tensor(lowercase_ , device=self.unet.device )
UpperCAmelCase : Any = self.reset_xa(lowercase_ , lowercase_ , self.action_dim )
UpperCAmelCase : Optional[Any] = self.to_torch(lowercase_ )
# run the diffusion process
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.run_diffusion(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# sort output trajectories by value
UpperCAmelCase : List[Any] = y.argsort(0 , descending=lowercase_ ).squeeze()
UpperCAmelCase : Any = x[sorted_idx]
UpperCAmelCase : List[str] = sorted_values[:, :, : self.action_dim]
UpperCAmelCase : Union[str, Any] = actions.detach().cpu().numpy()
UpperCAmelCase : Optional[int] = self.de_normalize(lowercase_ , key='actions' )
# select the action with the highest value
if y is not None:
UpperCAmelCase : Tuple = 0
else:
# if we didn't run value guiding, select a random action
UpperCAmelCase : Tuple = np.random.randint(0 , lowercase_ )
UpperCAmelCase : List[str] = denorm_actions[selected_index, 0]
return denorm_actions
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10**9 ):
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : List[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCAmelCase : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 1 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : float = 3.0
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int ) -> List[str]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=lowercase_ ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCAmelCase : Optional[int] = GradScalerKwargs(init_scale=1_024 , growth_factor=2 )
AcceleratorState._reset_state()
UpperCAmelCase : Union[str, Any] = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
UpperCAmelCase : Union[str, Any] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_000 )
self.assertEqual(scaler._enabled , lowercase_ )
@require_multi_gpu
def UpperCAmelCase_ ( self : Any ) -> List[str]:
UpperCAmelCase : Optional[Any] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
if __name__ == "__main__":
lowercase__ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowercase__ = Accelerator(kwargs_handlers=[ddp_scaler])
lowercase__ = torch.nn.Linear(100, 200)
lowercase__ = accelerator.prepare(model)
# Check the values changed in kwargs
lowercase__ = ""
lowercase__ = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 695 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase : str = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
UpperCAmelCase : Any = self.dummy_uncond_unet
UpperCAmelCase : Tuple = DDIMScheduler()
UpperCAmelCase : Optional[Any] = self.dummy_vq_model
UpperCAmelCase : str = LDMPipeline(unet=lowercase_ , vqvae=lowercase_ , scheduler=lowercase_ )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : int = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' ).images
UpperCAmelCase : int = torch.manual_seed(0 )
UpperCAmelCase : Tuple = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : List[str] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCAmelCase : Tuple = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : Any = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Tuple = torch.manual_seed(0 )
UpperCAmelCase : Dict = ldm(generator=lowercase_ , num_inference_steps=5 , output_type='numpy' ).images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCAmelCase : Any = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 695 | 1 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or number < 0:
raise ValueError('Input must be a non-negative integer' )
UpperCAmelCase : str = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
UpperCAmelCase : Dict = self.dummy_uncond_unet
UpperCAmelCase : Dict = KarrasVeScheduler()
UpperCAmelCase : str = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : Dict = 'google/ncsnpp-celebahq-256'
UpperCAmelCase : Any = UNetaDModel.from_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = KarrasVeScheduler()
UpperCAmelCase : Dict = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = pipe(num_inference_steps=20 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 695 | 1 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10 , UpperCAmelCase_ = 10_00 , UpperCAmelCase_ = True ):
assert (
isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
and isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
and isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)' )
return min_val if option else max_val
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return int((number_a + number_a) / 2 )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
assert (
isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)' )
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value' )
def answer(UpperCAmelCase_ ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...' )
UpperCAmelCase : Dict = lower
UpperCAmelCase : List[Any] = higher
UpperCAmelCase : List[str] = []
while True:
UpperCAmelCase : Dict = get_avg(UpperCAmelCase_ , UpperCAmelCase_ )
last_numbers.append(UpperCAmelCase_ )
if answer(UpperCAmelCase_ ) == "low":
UpperCAmelCase : str = number
elif answer(UpperCAmelCase_ ) == "high":
UpperCAmelCase : int = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""" )
print(F"""details : {last_numbers!s}""" )
def UpperCamelCase( ):
UpperCAmelCase : List[str] = int(input('Enter lower value : ' ).strip() )
UpperCAmelCase : Any = int(input('Enter high value : ' ).strip() )
UpperCAmelCase : Dict = int(input('Enter value to guess : ' ).strip() )
guess_the_number(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 695 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = """autoformer"""
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Dict , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : str = "student_t" , lowercase_ : str = "nll" , lowercase_ : int = 1 , lowercase_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowercase_ : bool = True , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : Optional[List[int]] = None , lowercase_ : Optional[List[int]] = None , lowercase_ : int = 64 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 32 , lowercase_ : int = 32 , lowercase_ : str = "gelu" , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : int = 100 , lowercase_ : float = 0.02 , lowercase_ : bool = True , lowercase_ : Union[str, Any]=True , lowercase_ : int = 10 , lowercase_ : int = 25 , lowercase_ : int = 3 , **lowercase_ : str , ) -> Dict:
# time series specific configuration
UpperCAmelCase : int = prediction_length
UpperCAmelCase : Optional[Any] = context_length if context_length is not None else prediction_length
UpperCAmelCase : List[Any] = distribution_output
UpperCAmelCase : Tuple = loss
UpperCAmelCase : Dict = input_size
UpperCAmelCase : Dict = num_time_features
UpperCAmelCase : Tuple = lags_sequence
UpperCAmelCase : str = scaling
UpperCAmelCase : Optional[int] = num_dynamic_real_features
UpperCAmelCase : List[str] = num_static_real_features
UpperCAmelCase : Optional[int] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : int = cardinality
else:
UpperCAmelCase : Union[str, Any] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : Any = embedding_dimension
else:
UpperCAmelCase : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase : Dict = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase : Optional[int] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : Dict = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_attention_heads
UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase : str = decoder_ffn_dim
UpperCAmelCase : str = encoder_layers
UpperCAmelCase : Optional[Any] = decoder_layers
UpperCAmelCase : int = dropout
UpperCAmelCase : Any = attention_dropout
UpperCAmelCase : Tuple = activation_dropout
UpperCAmelCase : str = encoder_layerdrop
UpperCAmelCase : Union[str, Any] = decoder_layerdrop
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Dict = init_std
UpperCAmelCase : Union[str, Any] = use_cache
# Autoformer
UpperCAmelCase : Any = label_length
UpperCAmelCase : List[Any] = moving_average
UpperCAmelCase : Optional[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCAmelCase_ ( self : List[str] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 695 | 1 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class A_ :
'''simple docstring'''
def __init__( self : Optional[int] , lowercase_ : str = "cpu" , lowercase_ : str = "openai/clip-vit-large-patch14" ) -> None:
UpperCAmelCase : Union[str, Any] = device
UpperCAmelCase : int = CLIPTokenizerFast.from_pretrained(lowercase_ )
UpperCAmelCase : int = [0.4814_5466, 0.457_8275, 0.4082_1073]
UpperCAmelCase : Optional[Any] = [0.2686_2954, 0.2613_0258, 0.2757_7711]
UpperCAmelCase : List[Any] = torchvision.transforms.Normalize(self.image_mean , self.image_std )
UpperCAmelCase : List[Any] = torchvision.transforms.Resize(224 )
UpperCAmelCase : Optional[Any] = torchvision.transforms.CenterCrop(224 )
def UpperCAmelCase_ ( self : int , lowercase_ : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = self.resize(lowercase_ )
UpperCAmelCase : Optional[int] = self.center_crop(lowercase_ )
UpperCAmelCase : str = self.normalize(lowercase_ )
return images
def __call__( self : str , lowercase_ : List[Any]=None , lowercase_ : Any=None , **lowercase_ : Optional[Any] ) -> int:
UpperCAmelCase : Optional[int] = self.tokenizer(text=lowercase_ , **lowercase_ )
UpperCAmelCase : int = self.preprocess_img(lowercase_ )
UpperCAmelCase : Optional[int] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : Union[str, Any]=10 , lowercase_ : Tuple=0.01 , lowercase_ : List[Any]=None , lowercase_ : Tuple=None , lowercase_ : List[str]=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Tuple=False , lowercase_ : Union[str, Any]=True , lowercase_ : List[str]="image" , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[int]=False , lowercase_ : str=False , lowercase_ : Optional[Any]=False , ) -> None:
super().__init__()
UpperCAmelCase : List[str] = None
UpperCAmelCase : Any = device if device else get_device()
if vqgan:
UpperCAmelCase : Any = vqgan
else:
UpperCAmelCase : Tuple = load_vqgan(self.device , conf_path=lowercase_ , ckpt_path=lowercase_ )
self.vqgan.eval()
if clip:
UpperCAmelCase : List[str] = clip
else:
UpperCAmelCase : Tuple = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
UpperCAmelCase : List[str] = ProcessorGradientFlow(device=self.device )
UpperCAmelCase : str = iterations
UpperCAmelCase : List[Any] = lr
UpperCAmelCase : Optional[int] = log
UpperCAmelCase : Dict = make_grid
UpperCAmelCase : List[str] = return_val
UpperCAmelCase : List[Any] = quantize
UpperCAmelCase : Union[str, Any] = self.vqgan.decoder.z_shape
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : int=None , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=5 , lowercase_ : int=True ) -> Dict:
UpperCAmelCase : Any = []
if output_path is None:
UpperCAmelCase : Dict = './animation.gif'
if input_path is None:
UpperCAmelCase : List[str] = self.save_path
UpperCAmelCase : str = sorted(glob(input_path + '/*' ) )
if not len(lowercase_ ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(lowercase_ ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
UpperCAmelCase : Union[str, Any] = total_duration / len(lowercase_ )
UpperCAmelCase : Any = [frame_duration] * len(lowercase_ )
if extend_frames:
UpperCAmelCase : Optional[int] = 1.5
UpperCAmelCase : Optional[Any] = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(lowercase_ ) )
imageio.mimsave(lowercase_ , lowercase_ , duration=lowercase_ )
print(f"""gif saved to {output_path}""" )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int=None , lowercase_ : str=None ) -> Any:
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
UpperCAmelCase : Optional[Any] = preprocess(Image.open(lowercase_ ) , target_image_size=256 ).to(self.device )
UpperCAmelCase : Optional[int] = preprocess_vqgan(lowercase_ )
UpperCAmelCase , *UpperCAmelCase : Any = self.vqgan.encode(lowercase_ )
return z
def UpperCAmelCase_ ( self : Dict , lowercase_ : Union[str, Any] ) -> Tuple:
UpperCAmelCase : int = self.latent.detach().requires_grad_()
UpperCAmelCase : str = base_latent + transform_vector
if self.quantize:
UpperCAmelCase , *UpperCAmelCase : List[Any] = self.vqgan.quantize(lowercase_ )
else:
UpperCAmelCase : List[str] = trans_latent
return self.vqgan.decode(lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Any=None ) -> Tuple:
UpperCAmelCase : str = self.clip_preprocessor(text=lowercase_ , images=lowercase_ , return_tensors='pt' , padding=lowercase_ )
UpperCAmelCase : str = self.clip(**lowercase_ )
UpperCAmelCase : List[Any] = clip_outputs.logits_per_image
if weights is not None:
UpperCAmelCase : Optional[Any] = similarity_logits * weights
return similarity_logits.sum()
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> Tuple:
UpperCAmelCase : List[Any] = self._get_clip_similarity(pos_prompts['prompts'] , lowercase_ , weights=(1 / pos_prompts['weights']) )
if neg_prompts:
UpperCAmelCase : Dict = self._get_clip_similarity(neg_prompts['prompts'] , lowercase_ , weights=neg_prompts['weights'] )
else:
UpperCAmelCase : Optional[Any] = torch.tensor([1] , device=self.device )
UpperCAmelCase : Optional[int] = -torch.log(lowercase_ ) + torch.log(lowercase_ )
return loss
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[Any] ) -> int:
UpperCAmelCase : Any = torch.randn_like(self.latent , requires_grad=lowercase_ , device=self.device )
UpperCAmelCase : Optional[Any] = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
UpperCAmelCase : List[Any] = self._add_vector(lowercase_ )
UpperCAmelCase : int = loop_post_process(lowercase_ )
UpperCAmelCase : List[str] = self._get_CLIP_loss(lowercase_ , lowercase_ , lowercase_ )
print('CLIP loss' , lowercase_ )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=lowercase_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] ) -> Optional[Any]:
wandb.init(reinit=lowercase_ , project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
UpperCAmelCase : Dict = Image.open(lowercase_ )
UpperCAmelCase : List[Any] = image.resize((256, 256) )
wandb.log('Original Image' , wandb.Image(lowercase_ ) )
def UpperCAmelCase_ ( self : Tuple , lowercase_ : Union[str, Any] ) -> Optional[Any]:
if not prompts:
return []
UpperCAmelCase : str = []
UpperCAmelCase : int = []
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : int = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(lowercase_ , (tuple, list) ):
UpperCAmelCase : Tuple = prompt[0]
UpperCAmelCase : List[Any] = float(prompt[1] )
elif ":" in prompt:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = prompt.split(':' )
UpperCAmelCase : Optional[int] = float(lowercase_ )
else:
UpperCAmelCase : Optional[Any] = prompt
UpperCAmelCase : int = 1.0
processed_prompts.append(lowercase_ )
weights.append(lowercase_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowercase_ , device=self.device ),
}
def UpperCAmelCase_ ( self : int , lowercase_ : Any , lowercase_ : str=None , lowercase_ : Any=None , lowercase_ : Union[str, Any]=True , lowercase_ : int=False , lowercase_ : Tuple=True , lowercase_ : int=True , lowercase_ : Union[str, Any]=None , ) -> Any:
if image_path:
UpperCAmelCase : Dict = self._get_latent(lowercase_ )
else:
UpperCAmelCase : Optional[int] = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(lowercase_ , lowercase_ , lowercase_ )
assert pos_prompts, "You must provide at least one positive prompt."
UpperCAmelCase : Union[str, Any] = self.process_prompts(lowercase_ )
UpperCAmelCase : List[str] = self.process_prompts(lowercase_ )
if save_final and save_path is None:
UpperCAmelCase : Tuple = os.path.join('./outputs/' , '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(lowercase_ ):
os.makedirs(lowercase_ )
else:
UpperCAmelCase : List[str] = save_path + '_' + get_timestamp()
os.makedirs(lowercase_ )
UpperCAmelCase : Union[str, Any] = save_path
UpperCAmelCase : Dict = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(lowercase_ ) )
UpperCAmelCase : Union[str, Any] = loop_post_process(lowercase_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(lowercase_ , lowercase_ , lowercase_ ) ):
if show_intermediate:
show_pil(lowercase_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({'Image': wandb.Image(lowercase_ )} )
if show_final:
show_pil(lowercase_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}_final.png""" ) )
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Dict = """ctrl"""
UpperCAmelCase_ : int = ["""past_key_values"""]
UpperCAmelCase_ : int = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Tuple , lowercase_ : Optional[Any]=246_534 , lowercase_ : List[str]=256 , lowercase_ : Dict=1_280 , lowercase_ : Any=8_192 , lowercase_ : Union[str, Any]=48 , lowercase_ : Optional[Any]=16 , lowercase_ : Any=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Union[str, Any]=1E-6 , lowercase_ : str=0.02 , lowercase_ : Dict=True , **lowercase_ : Dict , ) -> Any:
UpperCAmelCase : Any = vocab_size
UpperCAmelCase : List[str] = n_positions
UpperCAmelCase : Any = n_embd
UpperCAmelCase : Union[str, Any] = n_layer
UpperCAmelCase : Union[str, Any] = n_head
UpperCAmelCase : Union[str, Any] = dff
UpperCAmelCase : int = resid_pdrop
UpperCAmelCase : Optional[int] = embd_pdrop
UpperCAmelCase : Any = layer_norm_epsilon
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : List[Any] = use_cache
super().__init__(**lowercase_ )
| 695 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : int = sin(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = cos(UpperCAmelCase_ )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : Tuple = (1 - _cos) / 2
UpperCAmelCase : Dict = 1 - _cos
UpperCAmelCase : Union[str, Any] = 1 + alpha
UpperCAmelCase : List[str] = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha
UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[str] = cos(UpperCAmelCase_ )
UpperCAmelCase : Tuple = _sin / (2 * q_factor)
UpperCAmelCase : int = (1 + _cos) / 2
UpperCAmelCase : List[Any] = -1 - _cos
UpperCAmelCase : Optional[int] = 1 + alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : str = 1 - alpha
UpperCAmelCase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : List[Any] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : Any = _sin / (2 * q_factor)
UpperCAmelCase : Optional[Any] = _sin / 2
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : int = -ba
UpperCAmelCase : List[str] = 1 + alpha
UpperCAmelCase : int = -2 * _cos
UpperCAmelCase : Optional[int] = 1 - alpha
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : Tuple = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : int = 1 - alpha
UpperCAmelCase : Dict = -2 * _cos
UpperCAmelCase : Any = 1 + alpha
UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase : Union[str, Any] = 10 ** (gain_db / 40)
UpperCAmelCase : int = 1 + alpha * big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha * big_a
UpperCAmelCase : Tuple = 1 + alpha / big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : int = 1 - alpha / big_a
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Dict = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : Any = cos(UpperCAmelCase_ )
UpperCAmelCase : str = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : List[str] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : List[Any] = big_a * (pmc + aaa)
UpperCAmelCase : Optional[int] = 2 * big_a * mpc
UpperCAmelCase : Optional[int] = big_a * (pmc - aaa)
UpperCAmelCase : str = ppmc + aaa
UpperCAmelCase : int = -2 * pmpc
UpperCAmelCase : int = ppmc - aaa
UpperCAmelCase : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : List[Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : Optional[Any] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : Dict = big_a * (ppmc + aaa)
UpperCAmelCase : List[str] = -2 * big_a * pmpc
UpperCAmelCase : int = big_a * (ppmc - aaa)
UpperCAmelCase : Dict = pmc + aaa
UpperCAmelCase : Optional[int] = 2 * mpc
UpperCAmelCase : int = pmc - aaa
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 695 | 1 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase , UpperCAmelCase : Any = 1, 1
UpperCAmelCase : Any = []
for i in range(1 , n + 1 ):
UpperCAmelCase : Tuple = prev_numerator + 2 * prev_denominator
UpperCAmelCase : Any = prev_numerator + prev_denominator
if len(str(UpperCAmelCase_ ) ) > len(str(UpperCAmelCase_ ) ):
result.append(UpperCAmelCase_ )
UpperCAmelCase : Dict = numerator
UpperCAmelCase : Dict = denominator
return len(UpperCAmelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowercase__ = TypeVar("T")
class A_ ( Generic[T] ):
'''simple docstring'''
UpperCAmelCase_ : deque[T] # Cache store of keys
UpperCAmelCase_ : set[T] # References of the keys in cache
UpperCAmelCase_ : int = 10 # Maximum capacity of cache
def __init__( self : List[Any] , lowercase_ : int ) -> None:
UpperCAmelCase : Any = deque()
UpperCAmelCase : Dict = set()
if not n:
UpperCAmelCase : Optional[int] = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCAmelCase : str = n
def UpperCAmelCase_ ( self : List[str] , lowercase_ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCAmelCase : Optional[Any] = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> None:
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Union[str, Any] ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 695 | 1 |
'''simple docstring'''
lowercase__ = "0.18.2"
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 695 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
lowercase__ = {
"gpt-neox-20b": 2048,
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowercase_ : Any=None , lowercase_ : Dict=None , lowercase_ : List[str]=None , lowercase_ : List[Any]="<|endoftext|>" , lowercase_ : List[str]="<|endoftext|>" , lowercase_ : Any="<|endoftext|>" , lowercase_ : List[str]=False , **lowercase_ : Union[str, Any] , ) -> str:
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
UpperCAmelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase_ ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(lowercase_ , pre_tok_state.pop('type' ) )
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Tuple = pre_tok_class(**lowercase_ )
UpperCAmelCase : Any = add_prefix_space
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase : Optional[int] = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : "Conversation" ) -> List[int]:
UpperCAmelCase : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_ ) + [self.eos_token_id] )
if len(lowercase_ ) > self.model_max_length:
UpperCAmelCase : int = input_ids[-self.model_max_length :]
return input_ids
| 695 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = GPTSanJapaneseTokenizer
UpperCAmelCase_ : int = False
UpperCAmelCase_ : Union[str, Any] = {"""do_clean_text""": False, """add_prefix_space""": False}
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
super().setUp()
# fmt: off
UpperCAmelCase : Any = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
UpperCAmelCase : Dict = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
UpperCAmelCase : Tuple = {'unk_token': '<unk>'}
UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(lowercase_ ) )
def UpperCAmelCase_ ( self : Optional[Any] , **lowercase_ : Any ) -> List[str]:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : List[str] ) -> Union[str, Any]:
UpperCAmelCase : str = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
UpperCAmelCase : Dict = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def UpperCAmelCase_ ( self : int , lowercase_ : int ) -> Any:
UpperCAmelCase , UpperCAmelCase : Tuple = self.get_input_output_texts(lowercase_ )
UpperCAmelCase : str = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
UpperCAmelCase : Optional[Any] = tokenizer.decode(lowercase_ , clean_up_tokenization_spaces=lowercase_ )
return text, ids
def UpperCAmelCase_ ( self : Dict ) -> str:
pass # TODO add if relevant
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
pass # TODO add if relevant
def UpperCAmelCase_ ( self : List[str] ) -> Any:
pass # TODO add if relevant
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
UpperCAmelCase : List[str] = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase : Any = 'こんにちは、世界。 こんばんは、㔺界。'
UpperCAmelCase : Any = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
UpperCAmelCase : Tuple = tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Testing conversion to ids without special tokens
UpperCAmelCase : List[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Testing conversion to ids with special tokens
UpperCAmelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
UpperCAmelCase : int = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
UpperCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase : Any = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase : Union[str, Any] = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
UpperCAmelCase : str = 'こんにちは、、、、世界。こんばんは、、、、世界。'
UpperCAmelCase : Any = tokenizer.encode(lowercase_ )
UpperCAmelCase : str = tokenizer.decode(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
@slow
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
UpperCAmelCase : Any = 'こんにちは、世界。'
UpperCAmelCase : str = 'こんばんは、㔺界。😀'
UpperCAmelCase : Optional[Any] = 'こんにちは、世界。こんばんは、世界。😀'
UpperCAmelCase : Union[str, Any] = tokenizer.encode(prefix_text + input_text )
UpperCAmelCase : List[str] = tokenizer.encode('' , prefix_text=prefix_text + input_text )
UpperCAmelCase : str = tokenizer.encode(lowercase_ , prefix_text=lowercase_ )
UpperCAmelCase : Dict = tokenizer.decode(lowercase_ )
UpperCAmelCase : int = tokenizer.decode(lowercase_ )
UpperCAmelCase : Tuple = tokenizer.decode(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
@slow
def UpperCAmelCase_ ( self : int ) -> Any:
UpperCAmelCase : int = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
UpperCAmelCase : List[str] = 'こんにちは、世界。'
UpperCAmelCase : Dict = 'こんばんは、㔺界。😀'
UpperCAmelCase : Any = len(tokenizer.encode(lowercase_ ) ) - 2
UpperCAmelCase : str = len(tokenizer.encode(lowercase_ ) ) - 2
UpperCAmelCase : Optional[int] = [1] + [0] * (len_prefix + len_text + 1)
UpperCAmelCase : Union[str, Any] = [1] * (len_prefix + len_text + 1) + [0]
UpperCAmelCase : Union[str, Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
UpperCAmelCase : List[Any] = tokenizer(prefix_text + input_text ).token_type_ids
UpperCAmelCase : Dict = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
UpperCAmelCase : Any = tokenizer(lowercase_ , prefix_text=lowercase_ ).token_type_ids
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
@slow
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
UpperCAmelCase : List[str] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
UpperCAmelCase : Tuple = tokenizer.encode('あンいワ' )
UpperCAmelCase : Union[str, Any] = tokenizer.encode('' , prefix_text='あンいワ' )
UpperCAmelCase : Tuple = tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(lowercase_ ) , tokenizer.decode(lowercase_ ) )
self.assertEqual(tokenizer.decode(lowercase_ ) , tokenizer.decode(lowercase_ ) )
self.assertNotEqual(lowercase_ , lowercase_ )
self.assertNotEqual(lowercase_ , lowercase_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> str:
UpperCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
UpperCAmelCase : List[Any] = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
UpperCAmelCase : str = tokenizer(lowercase_ , padding=lowercase_ )
UpperCAmelCase : Tuple = tokenizer.batch_encode_plus(lowercase_ , padding=lowercase_ )
# fmt: off
UpperCAmelCase : Tuple = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
UpperCAmelCase : List[Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
UpperCAmelCase : Optional[Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , lowercase_ )
self.assertListEqual(x_token.token_type_ids , lowercase_ )
self.assertListEqual(x_token.attention_mask , lowercase_ )
self.assertListEqual(x_token_a.input_ids , lowercase_ )
self.assertListEqual(x_token_a.token_type_ids , lowercase_ )
self.assertListEqual(x_token_a.attention_mask , lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCAmelCase_ ( self : Any ) -> Any:
# tokenizer has no padding token
pass
| 695 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = """openai/whisper-base"""
UpperCAmelCase_ : Union[str, Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
UpperCAmelCase_ : Dict = """transcriber"""
UpperCAmelCase_ : int = WhisperProcessor
UpperCAmelCase_ : Optional[int] = WhisperForConditionalGeneration
UpperCAmelCase_ : Dict = ["""audio"""]
UpperCAmelCase_ : Optional[int] = ["""text"""]
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str ) -> Optional[int]:
return self.pre_processor(lowercase_ , return_tensors='pt' ).input_features
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int ) -> List[str]:
return self.model.generate(inputs=lowercase_ )
def UpperCAmelCase_ ( self : str , lowercase_ : List[Any] ) -> List[str]:
return self.pre_processor.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )[0]
| 695 | 1 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
def update_area_of_max_square(UpperCAmelCase_ , UpperCAmelCase_ ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
UpperCAmelCase : Union[str, Any] = update_area_of_max_square(UpperCAmelCase_ , col + 1 )
UpperCAmelCase : Union[str, Any] = update_area_of_max_square(row + 1 , col + 1 )
UpperCAmelCase : List[Any] = update_area_of_max_square(row + 1 , UpperCAmelCase_ )
if mat[row][col]:
UpperCAmelCase : Any = 1 + min([right, diagonal, down] )
UpperCAmelCase : Dict = max(largest_square_area[0] , UpperCAmelCase_ )
return sub_problem_sol
else:
return 0
UpperCAmelCase : str = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
def update_area_of_max_square_using_dp_array(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
UpperCAmelCase : Union[str, Any] = update_area_of_max_square_using_dp_array(UpperCAmelCase_ , col + 1 , UpperCAmelCase_ )
UpperCAmelCase : List[str] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = update_area_of_max_square_using_dp_array(row + 1 , UpperCAmelCase_ , UpperCAmelCase_ )
if mat[row][col]:
UpperCAmelCase : str = 1 + min([right, diagonal, down] )
UpperCAmelCase : Optional[int] = max(largest_square_area[0] , UpperCAmelCase_ )
UpperCAmelCase : List[str] = sub_problem_sol
return sub_problem_sol
else:
return 0
UpperCAmelCase : Dict = [0]
UpperCAmelCase : Optional[Any] = [[-1] * cols for _ in range(UpperCAmelCase_ )]
update_area_of_max_square_using_dp_array(0 , 0 , UpperCAmelCase_ )
return largest_square_area[0]
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Any = [[0] * (cols + 1) for _ in range(rows + 1 )]
UpperCAmelCase : Optional[int] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
UpperCAmelCase : Dict = dp_array[row][col + 1]
UpperCAmelCase : Optional[Any] = dp_array[row + 1][col + 1]
UpperCAmelCase : Tuple = dp_array[row + 1][col]
if mat[row][col] == 1:
UpperCAmelCase : Tuple = 1 + min(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : List[str] = max(dp_array[row][col] , UpperCAmelCase_ )
else:
UpperCAmelCase : List[Any] = 0
return largest_square_area
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : str = [0] * (cols + 1)
UpperCAmelCase : List[str] = [0] * (cols + 1)
UpperCAmelCase : Optional[int] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
UpperCAmelCase : List[Any] = current_row[col + 1]
UpperCAmelCase : List[Any] = next_row[col + 1]
UpperCAmelCase : Union[str, Any] = next_row[col]
if mat[row][col] == 1:
UpperCAmelCase : Any = 1 + min(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : List[str] = max(current_row[col] , UpperCAmelCase_ )
else:
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Any = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 695 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """whisper"""
UpperCAmelCase_ : Tuple = ["""past_key_values"""]
UpperCAmelCase_ : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : str , lowercase_ : Any=51_865 , lowercase_ : List[Any]=80 , lowercase_ : int=6 , lowercase_ : Dict=4 , lowercase_ : List[Any]=6 , lowercase_ : Any=4 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : List[Any]=50_257 , lowercase_ : Optional[int]=True , lowercase_ : Any=True , lowercase_ : str="gelu" , lowercase_ : List[str]=256 , lowercase_ : str=0.0 , lowercase_ : Any=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=False , lowercase_ : Union[str, Any]=1_500 , lowercase_ : List[Any]=448 , lowercase_ : int=50_256 , lowercase_ : Union[str, Any]=50_256 , lowercase_ : List[Any]=50_256 , lowercase_ : Tuple=None , lowercase_ : Optional[Any]=[220, 50_256] , lowercase_ : Tuple=False , lowercase_ : str=256 , lowercase_ : Optional[Any]=False , lowercase_ : List[Any]=0.05 , lowercase_ : Any=10 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=10 , lowercase_ : int=0 , lowercase_ : Optional[int]=7 , **lowercase_ : Union[str, Any] , ) -> List[str]:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Any = num_mel_bins
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : str = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_layers
UpperCAmelCase : Any = decoder_attention_heads
UpperCAmelCase : Tuple = decoder_ffn_dim
UpperCAmelCase : List[str] = encoder_ffn_dim
UpperCAmelCase : int = dropout
UpperCAmelCase : int = attention_dropout
UpperCAmelCase : List[Any] = activation_dropout
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Union[str, Any] = init_std
UpperCAmelCase : Dict = encoder_layerdrop
UpperCAmelCase : str = decoder_layerdrop
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase : Tuple = max_source_positions
UpperCAmelCase : List[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase : Optional[int] = classifier_proj_size
UpperCAmelCase : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Optional[Any] = apply_spec_augment
UpperCAmelCase : Optional[Any] = mask_time_prob
UpperCAmelCase : Optional[Any] = mask_time_length
UpperCAmelCase : str = mask_time_min_masks
UpperCAmelCase : List[str] = mask_feature_prob
UpperCAmelCase : Tuple = mask_feature_length
UpperCAmelCase : Optional[int] = mask_feature_min_masks
UpperCAmelCase : str = median_filter_width
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , suppress_tokens=lowercase_ , begin_suppress_tokens=lowercase_ , **lowercase_ , )
class A_ ( _snake_case ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase : Optional[int] = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase : int = {0: 'batch'}
else:
UpperCAmelCase : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='inputs' )
return common_inputs
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , lowercase_ : int = 22_050 , lowercase_ : float = 5.0 , lowercase_ : int = 220 , ) -> Mapping[str, Any]:
UpperCAmelCase : Tuple = OrderedDict()
UpperCAmelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase_ , framework=lowercase_ , sampling_rate=lowercase_ , time_duration=lowercase_ , frequency=lowercase_ , )
UpperCAmelCase : Optional[Any] = encoder_inputs['input_features'].shape[2]
UpperCAmelCase : Tuple = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase : Optional[int] = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Dict = encoder_inputs.pop('input_features' )
UpperCAmelCase : List[str] = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase : Union[str, Any] = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def UpperCAmelCase_ ( self : Dict ) -> float:
return 1E-3
| 695 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """data2vec-vision"""
def __init__( self : int , lowercase_ : List[str]=768 , lowercase_ : Dict=12 , lowercase_ : int=12 , lowercase_ : str=3_072 , lowercase_ : Optional[int]="gelu" , lowercase_ : Any=0.0 , lowercase_ : Any=0.0 , lowercase_ : Tuple=0.02 , lowercase_ : int=1E-12 , lowercase_ : int=224 , lowercase_ : Dict=16 , lowercase_ : List[Any]=3 , lowercase_ : Union[str, Any]=False , lowercase_ : Dict=False , lowercase_ : str=False , lowercase_ : Union[str, Any]=False , lowercase_ : int=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Tuple=True , lowercase_ : Optional[int]=[3, 5, 7, 11] , lowercase_ : str=[1, 2, 3, 6] , lowercase_ : Any=True , lowercase_ : List[str]=0.4 , lowercase_ : List[str]=256 , lowercase_ : int=1 , lowercase_ : str=False , lowercase_ : str=255 , **lowercase_ : Dict , ) -> Optional[Any]:
super().__init__(**lowercase_ )
UpperCAmelCase : Tuple = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : Optional[Any] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : str = image_size
UpperCAmelCase : Tuple = patch_size
UpperCAmelCase : Union[str, Any] = num_channels
UpperCAmelCase : Dict = use_mask_token
UpperCAmelCase : int = use_absolute_position_embeddings
UpperCAmelCase : Optional[int] = use_relative_position_bias
UpperCAmelCase : Optional[Any] = use_shared_relative_position_bias
UpperCAmelCase : Union[str, Any] = layer_scale_init_value
UpperCAmelCase : Optional[int] = drop_path_rate
UpperCAmelCase : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCAmelCase : Optional[Any] = out_indices
UpperCAmelCase : List[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCAmelCase : Optional[int] = use_auxiliary_head
UpperCAmelCase : Optional[Any] = auxiliary_loss_weight
UpperCAmelCase : str = auxiliary_channels
UpperCAmelCase : Any = auxiliary_num_convs
UpperCAmelCase : int = auxiliary_concat_input
UpperCAmelCase : Union[str, Any] = semantic_loss_ignore_index
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = version.parse("""1.11""" )
@property
def UpperCAmelCase_ ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase_ ( self : int ) -> float:
return 1E-4
| 695 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowercase__ = "Create a default config file for Accelerate with only a few flags set."
def UpperCamelCase( UpperCAmelCase_="no" , UpperCAmelCase_ = default_json_config_file , UpperCAmelCase_ = False ):
UpperCAmelCase : Any = Path(UpperCAmelCase_ )
path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
UpperCAmelCase : Optional[int] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
UpperCAmelCase : Dict = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
UpperCAmelCase : Dict = torch.cuda.device_count()
UpperCAmelCase : List[Any] = num_gpus
UpperCAmelCase : List[Any] = False
if num_gpus > 1:
UpperCAmelCase : Tuple = 'MULTI_GPU'
else:
UpperCAmelCase : Optional[Any] = 'NO'
elif is_xpu_available() and use_xpu:
UpperCAmelCase : Optional[int] = torch.xpu.device_count()
UpperCAmelCase : Optional[int] = num_xpus
UpperCAmelCase : Any = False
if num_xpus > 1:
UpperCAmelCase : Tuple = 'MULTI_XPU'
else:
UpperCAmelCase : str = 'NO'
elif is_npu_available():
UpperCAmelCase : Optional[int] = torch.npu.device_count()
UpperCAmelCase : str = num_npus
UpperCAmelCase : int = False
if num_npus > 1:
UpperCAmelCase : int = 'MULTI_NPU'
else:
UpperCAmelCase : List[str] = 'NO'
else:
UpperCAmelCase : str = 0
UpperCAmelCase : int = True
UpperCAmelCase : str = 1
UpperCAmelCase : str = 'NO'
UpperCAmelCase : Any = ClusterConfig(**UpperCAmelCase_ )
config.to_json_file(UpperCAmelCase_ )
return path
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = parser.add_parser('default' , parents=UpperCAmelCase_ , help=UpperCAmelCase_ , formatter_class=UpperCAmelCase_ )
parser.add_argument(
'--config_file' , default=UpperCAmelCase_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=UpperCAmelCase_ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=UpperCAmelCase_ )
return parser
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 695 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = XLMTokenizer
UpperCAmelCase_ : int = False
def UpperCAmelCase_ ( self : Dict ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase : Dict = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
UpperCAmelCase : Dict = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
UpperCAmelCase : Any = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(lowercase_ ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(lowercase_ ) )
def UpperCAmelCase_ ( self : Tuple , lowercase_ : Optional[int] ) -> str:
UpperCAmelCase : List[Any] = 'lower newer'
UpperCAmelCase : Optional[int] = 'lower newer'
return input_text, output_text
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
UpperCAmelCase : Tuple = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase : Tuple = 'lower'
UpperCAmelCase : Tuple = ['low', 'er</w>']
UpperCAmelCase : Tuple = tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
UpperCAmelCase : Any = tokens + ['<unk>']
UpperCAmelCase : List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
@slow
def UpperCAmelCase_ ( self : Any ) -> List[str]:
UpperCAmelCase : Optional[Any] = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
UpperCAmelCase : Any = tokenizer.encode('sequence builders' , add_special_tokens=lowercase_ )
UpperCAmelCase : List[str] = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase_ )
UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(lowercase_ )
UpperCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 695 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> None:
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 695 | 1 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[str] = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def UpperCamelCase( UpperCAmelCase_ = 1_00 ):
UpperCAmelCase : Optional[Any] = 1
UpperCAmelCase : int = 2
for i in range(2 , max_n + 1 ):
UpperCAmelCase : Dict = pre_numerator
UpperCAmelCase : Dict = 2 * i // 3 if i % 3 == 0 else 1
UpperCAmelCase : Optional[int] = cur_numerator
UpperCAmelCase : str = e_cont * pre_numerator + temp
return sum_digits(UpperCAmelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowercase__ = logging.getLogger(__name__)
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : str
UpperCAmelCase_ : str
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[int]
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[Union[int, float]] = None
UpperCAmelCase_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : List[str] , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = None , lowercase_ : List[str]=False , lowercase_ : bool = False , ) -> Optional[Any]:
UpperCAmelCase : Dict = hans_processors[task]()
UpperCAmelCase : List[Any] = os.path.join(
lowercase_ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(lowercase_ ) , lowercase_ , ) , )
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase : int = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase : Tuple = torch.load(lowercase_ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase : int = (
processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
)
logger.info('Training examples: %s' , len(lowercase_ ) )
UpperCAmelCase : Dict = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
logger.info('Saving features into cached file %s' , lowercase_ )
torch.save(self.features , lowercase_ )
def __len__( self : Union[str, Any] ) -> str:
return len(self.features )
def __getitem__( self : Dict , lowercase_ : Dict ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : Tuple , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = 128 , lowercase_ : int=False , lowercase_ : bool = False , ) -> Union[str, Any]:
UpperCAmelCase : int = hans_processors[task]()
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Any = label_list
UpperCAmelCase : str = processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
UpperCAmelCase : int = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(lowercase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase : Optional[Any] = tf.data.Dataset.from_generator(
lowercase_ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
return self.dataset
def __len__( self : Tuple ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : List[Any] , lowercase_ : Union[str, Any] ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
return self.label_list
class A_ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int , lowercase_ : Optional[int] ) -> Any:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_train_set.txt' ) ) , 'train' )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> List[str]:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
return ["contradiction", "entailment", "neutral"]
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : str ) -> Dict:
UpperCAmelCase : Union[str, Any] = []
for i, line in enumerate(lowercase_ ):
if i == 0:
continue
UpperCAmelCase : Tuple = '%s-%s' % (set_type, line[0])
UpperCAmelCase : Tuple = line[5]
UpperCAmelCase : Dict = line[6]
UpperCAmelCase : Optional[Any] = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCAmelCase : Optional[Any] = line[0]
examples.append(InputExample(guid=lowercase_ , text_a=lowercase_ , text_b=lowercase_ , label=lowercase_ , pairID=lowercase_ ) )
return examples
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
UpperCAmelCase : List[str] = {label: i for i, label in enumerate(UpperCAmelCase_ )}
UpperCAmelCase : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(UpperCAmelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
UpperCAmelCase : int = tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , truncation=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , )
UpperCAmelCase : List[str] = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase : Any = int(example.pairID )
features.append(InputFeatures(**UpperCAmelCase_ , label=UpperCAmelCase_ , pairID=UpperCAmelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
lowercase__ = {
"hans": 3,
}
lowercase__ = {
"hans": HansProcessor,
}
| 695 | 1 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def UpperCamelCase( UpperCAmelCase_ ):
if isinstance(UpperCAmelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class A_ :
'''simple docstring'''
def UpperCAmelCase_ ( self : Any , lowercase_ : Tuple , lowercase_ : Dict ) -> int:
pass
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
pass
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
pass
def UpperCAmelCase_ ( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : List[Any]=None , **lowercase_ : List[str] ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ )
UpperCAmelCase : str = TFVisionTextDualEncoderModel(lowercase_ )
UpperCAmelCase : List[str] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Tuple=None , **lowercase_ : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.get_vision_text_model(lowercase_ , lowercase_ )
UpperCAmelCase : Optional[Any] = TFVisionTextDualEncoderModel(vision_model=lowercase_ , text_model=lowercase_ )
UpperCAmelCase : List[str] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def UpperCAmelCase_ ( self : Tuple , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : List[Any]=None , **lowercase_ : Union[str, Any] ) -> Dict:
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.get_vision_text_model(lowercase_ , lowercase_ )
UpperCAmelCase : Tuple = {'vision_model': vision_model, 'text_model': text_model}
UpperCAmelCase : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ )
UpperCAmelCase : Dict = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Union[str, Any]=None , **lowercase_ : List[str] ) -> str:
UpperCAmelCase , UpperCAmelCase : Any = self.get_vision_text_model(lowercase_ , lowercase_ )
UpperCAmelCase : str = TFVisionTextDualEncoderModel(vision_model=lowercase_ , text_model=lowercase_ )
UpperCAmelCase : List[str] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
UpperCAmelCase : Tuple = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase_ )
UpperCAmelCase : List[Any] = TFVisionTextDualEncoderModel.from_pretrained(lowercase_ )
UpperCAmelCase : Tuple = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
UpperCAmelCase : str = after_output[0].numpy()
UpperCAmelCase : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase_ , 1E-5 )
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : int=None , **lowercase_ : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.get_vision_text_model(lowercase_ , lowercase_ )
UpperCAmelCase : List[Any] = TFVisionTextDualEncoderModel(vision_model=lowercase_ , text_model=lowercase_ )
UpperCAmelCase : List[str] = model(
input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , output_attentions=lowercase_ )
UpperCAmelCase : Any = output.vision_model_output.attentions
self.assertEqual(len(lowercase_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase : Optional[int] = to_atuple(vision_model.config.image_size )
UpperCAmelCase : str = to_atuple(vision_model.config.patch_size )
UpperCAmelCase : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCAmelCase : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCAmelCase : Any = output.text_model_output.attentions
self.assertEqual(len(lowercase_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float ) -> Tuple:
UpperCAmelCase : str = np.abs((a - b) ).max()
self.assertLessEqual(lowercase_ , lowercase_ , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**lowercase_ )
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowercase_ )
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
UpperCAmelCase : str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowercase_ )
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
UpperCAmelCase : int = self.prepare_config_and_inputs()
self.check_save_load(**lowercase_ )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowercase_ )
@slow
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase , UpperCAmelCase : Tuple = self.get_pretrained_model_and_inputs()
UpperCAmelCase : Optional[int] = model_a(**lowercase_ )
UpperCAmelCase : str = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowercase_ )
UpperCAmelCase : Dict = TFVisionTextDualEncoderModel.from_pretrained(lowercase_ )
UpperCAmelCase : Dict = model_a(**lowercase_ )
UpperCAmelCase : int = after_outputs[0].numpy()
UpperCAmelCase : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase_ , 1E-5 )
@require_tf
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
UpperCAmelCase : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
UpperCAmelCase : Any = 13
UpperCAmelCase : int = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
UpperCAmelCase : List[str] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
UpperCAmelCase : Optional[int] = random_attention_mask([batch_size, 4] )
UpperCAmelCase : List[str] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Tuple ) -> Any:
UpperCAmelCase : Optional[Any] = TFViTModel(lowercase_ , name='vision_model' )
UpperCAmelCase : List[str] = TFBertModel(lowercase_ , name='text_model' )
return vision_model, text_model
def UpperCAmelCase_ ( self : List[Any] ) -> str:
UpperCAmelCase : int = TFViTModelTester(self )
UpperCAmelCase : Optional[int] = TFBertModelTester(self )
UpperCAmelCase : List[Any] = vit_model_tester.prepare_config_and_inputs()
UpperCAmelCase : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = vision_config_and_inputs
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : List[Any] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
UpperCAmelCase : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
UpperCAmelCase : Optional[Any] = 13
UpperCAmelCase : Tuple = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
UpperCAmelCase : Optional[int] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
UpperCAmelCase : Optional[int] = random_attention_mask([batch_size, 4] )
UpperCAmelCase : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCAmelCase_ ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]=None , **lowercase_ : Dict ) -> Tuple:
UpperCAmelCase , UpperCAmelCase : List[str] = self.get_vision_text_model(lowercase_ , lowercase_ )
UpperCAmelCase : Tuple = TFVisionTextDualEncoderModel(vision_model=lowercase_ , text_model=lowercase_ )
UpperCAmelCase : str = model(
input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , output_attentions=lowercase_ )
UpperCAmelCase : Dict = output.vision_model_output.attentions
self.assertEqual(len(lowercase_ ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCAmelCase : str = to_atuple(vision_model.config.image_size )
UpperCAmelCase : Optional[int] = to_atuple(vision_model.config.patch_size )
UpperCAmelCase : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCAmelCase : Union[str, Any] = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCAmelCase : Any = output.text_model_output.attentions
self.assertEqual(len(lowercase_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCAmelCase_ ( self : int , lowercase_ : Tuple , lowercase_ : Optional[int] ) -> Dict:
UpperCAmelCase : Any = TFDeiTModel(lowercase_ , name='vision_model' )
UpperCAmelCase : List[Any] = TFRobertaModel(lowercase_ , name='text_model' )
return vision_model, text_model
def UpperCAmelCase_ ( self : Any ) -> Dict:
UpperCAmelCase : Dict = TFDeiTModelTester(self )
UpperCAmelCase : Optional[Any] = TFRobertaModelTester(self )
UpperCAmelCase : int = vit_model_tester.prepare_config_and_inputs()
UpperCAmelCase : Optional[int] = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = vision_config_and_inputs
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Tuple = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
UpperCAmelCase : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
UpperCAmelCase : List[Any] = 13
UpperCAmelCase : Any = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
UpperCAmelCase : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
UpperCAmelCase : str = random_attention_mask([batch_size, 4] )
UpperCAmelCase : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCAmelCase_ ( self : int , lowercase_ : List[Any] , lowercase_ : List[Any] ) -> Optional[Any]:
UpperCAmelCase : List[Any] = TFCLIPVisionModel(lowercase_ , name='vision_model' )
UpperCAmelCase : List[Any] = TFBertModel(lowercase_ , name='text_model' )
return vision_model, text_model
def UpperCAmelCase_ ( self : str ) -> int:
UpperCAmelCase : str = TFCLIPVisionModelTester(self )
UpperCAmelCase : int = TFBertModelTester(self )
UpperCAmelCase : Optional[int] = clip_model_tester.prepare_config_and_inputs()
UpperCAmelCase : Tuple = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase : Tuple = vision_config_and_inputs
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Optional[int] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase_ ( self : Any ) -> Any:
UpperCAmelCase : Tuple = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=lowercase_ )
UpperCAmelCase : List[Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
UpperCAmelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
UpperCAmelCase : Union[str, Any] = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=lowercase_ , padding=lowercase_ , return_tensors='np' )
UpperCAmelCase : int = model(**lowercase_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
UpperCAmelCase : List[Any] = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , lowercase_ , atol=1E-3 ) )
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase , UpperCAmelCase : Any = 1, 1
UpperCAmelCase : Any = []
for i in range(1 , n + 1 ):
UpperCAmelCase : Tuple = prev_numerator + 2 * prev_denominator
UpperCAmelCase : Any = prev_numerator + prev_denominator
if len(str(UpperCAmelCase_ ) ) > len(str(UpperCAmelCase_ ) ):
result.append(UpperCAmelCase_ )
UpperCAmelCase : Dict = numerator
UpperCAmelCase : Dict = denominator
return len(UpperCAmelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """pix2struct_text_model"""
UpperCAmelCase_ : Union[str, Any] = ["""past_key_values"""]
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : int , lowercase_ : str=50_244 , lowercase_ : Tuple=768 , lowercase_ : List[Any]=64 , lowercase_ : List[Any]=2_048 , lowercase_ : Optional[Any]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=32 , lowercase_ : List[str]=128 , lowercase_ : List[Any]=0.1 , lowercase_ : List[str]=1E-6 , lowercase_ : Union[str, Any]=1.0 , lowercase_ : Dict="gelu_new" , lowercase_ : Any=0 , lowercase_ : Any=False , lowercase_ : List[Any]=0 , lowercase_ : Tuple=1 , lowercase_ : List[str]=False , lowercase_ : List[Any]=True , **lowercase_ : Union[str, Any] , ) -> Dict:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : int = hidden_size
UpperCAmelCase : List[Any] = d_kv
UpperCAmelCase : Any = d_ff
UpperCAmelCase : List[str] = num_layers
UpperCAmelCase : str = num_heads
UpperCAmelCase : List[Any] = relative_attention_num_buckets
UpperCAmelCase : Tuple = relative_attention_max_distance
UpperCAmelCase : str = dropout_rate
UpperCAmelCase : Optional[int] = layer_norm_epsilon
UpperCAmelCase : int = initializer_factor
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : List[Any] = eos_token_id
UpperCAmelCase : Union[str, Any] = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase : List[str] = dense_act_fn
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , tie_word_embeddings=lowercase_ , is_decoder=lowercase_ , **lowercase_ , )
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : str = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : Any = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = """pix2struct_vision_model"""
def __init__( self : str , lowercase_ : Any=768 , lowercase_ : Union[str, Any]=768 , lowercase_ : Union[str, Any]=2_048 , lowercase_ : Tuple=64 , lowercase_ : Dict=12 , lowercase_ : Optional[int]=12 , lowercase_ : int="gelu_new" , lowercase_ : List[Any]=1E-6 , lowercase_ : Optional[int]=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : str=1E-10 , lowercase_ : Dict=1.0 , lowercase_ : int=4_096 , lowercase_ : Tuple=32 , lowercase_ : Any=128 , **lowercase_ : Any , ) -> Tuple:
super().__init__(**lowercase_ )
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : Any = patch_embed_hidden_size
UpperCAmelCase : Optional[int] = d_ff
UpperCAmelCase : Dict = dropout_rate
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : str = initializer_factor
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : Union[str, Any] = dense_act_fn
UpperCAmelCase : Dict = seq_len
UpperCAmelCase : Optional[int] = relative_attention_num_buckets
UpperCAmelCase : Union[str, Any] = relative_attention_max_distance
UpperCAmelCase : str = d_kv
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Any ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Tuple = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : List[str] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """pix2struct"""
UpperCAmelCase_ : Dict = True
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : int=None , lowercase_ : Optional[Any]=1.0 , lowercase_ : List[str]=0.02 , lowercase_ : str=False , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , **lowercase_ : Optional[Any] , ) -> str:
super().__init__(tie_word_embeddings=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ )
if text_config is None:
UpperCAmelCase : Optional[int] = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
UpperCAmelCase : List[str] = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
UpperCAmelCase : Optional[Any] = PixaStructTextConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = PixaStructVisionConfig(**lowercase_ )
UpperCAmelCase : Optional[Any] = self.text_config.decoder_start_token_id
UpperCAmelCase : str = self.text_config.pad_token_id
UpperCAmelCase : Optional[int] = self.text_config.eos_token_id
UpperCAmelCase : Union[str, Any] = initializer_factor
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : str = is_vqa
@classmethod
def UpperCAmelCase_ ( cls : Tuple , lowercase_ : PixaStructTextConfig , lowercase_ : PixaStructVisionConfig , **lowercase_ : str ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.text_config.to_dict()
UpperCAmelCase : Dict = self.vision_config.to_dict()
UpperCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase : List[Any] = 2**power
UpperCAmelCase : List[Any] = 0
while n:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 695 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class A_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
UpperCAmelCase_ : Dict = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def UpperCAmelCase_ ( self : Dict , lowercase_ : List[str] , lowercase_ : str , lowercase_ : str ) -> List[Any]:
UpperCAmelCase : Dict = AudioClassificationPipeline(model=lowercase_ , feature_extractor=lowercase_ )
# test with a raw waveform
UpperCAmelCase : str = np.zeros((34_000,) )
UpperCAmelCase : List[Any] = np.zeros((14_000,) )
return audio_classifier, [audioa, audio]
def UpperCAmelCase_ ( self : Tuple , lowercase_ : Dict , lowercase_ : Any ) -> List[str]:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = examples
UpperCAmelCase : Optional[Any] = audio_classifier(lowercase_ )
# by default a model is initialized with num_labels=2
self.assertEqual(
lowercase_ , [
{'score': ANY(lowercase_ ), 'label': ANY(lowercase_ )},
{'score': ANY(lowercase_ ), 'label': ANY(lowercase_ )},
] , )
UpperCAmelCase : Any = audio_classifier(lowercase_ , top_k=1 )
self.assertEqual(
lowercase_ , [
{'score': ANY(lowercase_ ), 'label': ANY(lowercase_ )},
] , )
self.run_torchaudio(lowercase_ )
@require_torchaudio
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : str ) -> Any:
import datasets
# test with a local file
UpperCAmelCase : List[str] = datasets.load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
UpperCAmelCase : List[str] = dataset[0]['audio']['array']
UpperCAmelCase : List[str] = audio_classifier(lowercase_ )
self.assertEqual(
lowercase_ , [
{'score': ANY(lowercase_ ), 'label': ANY(lowercase_ )},
{'score': ANY(lowercase_ ), 'label': ANY(lowercase_ )},
] , )
@require_torch
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
UpperCAmelCase : int = 'anton-l/wav2vec2-random-tiny-classifier'
UpperCAmelCase : Tuple = pipeline('audio-classification' , model=lowercase_ )
UpperCAmelCase : Optional[Any] = np.ones((8_000,) )
UpperCAmelCase : str = audio_classifier(lowercase_ , top_k=4 )
UpperCAmelCase : List[str] = [
{'score': 0.0842, 'label': 'no'},
{'score': 0.0838, 'label': 'up'},
{'score': 0.0837, 'label': 'go'},
{'score': 0.0834, 'label': 'right'},
]
UpperCAmelCase : Union[str, Any] = [
{'score': 0.0845, 'label': 'stop'},
{'score': 0.0844, 'label': 'on'},
{'score': 0.0841, 'label': 'right'},
{'score': 0.0834, 'label': 'left'},
]
self.assertIn(nested_simplify(lowercase_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
UpperCAmelCase : Any = {'array': np.ones((8_000,) ), 'sampling_rate': audio_classifier.feature_extractor.sampling_rate}
UpperCAmelCase : str = audio_classifier(lowercase_ , top_k=4 )
self.assertIn(nested_simplify(lowercase_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
import datasets
UpperCAmelCase : Tuple = 'superb/wav2vec2-base-superb-ks'
UpperCAmelCase : Any = pipeline('audio-classification' , model=lowercase_ )
UpperCAmelCase : List[Any] = datasets.load_dataset('anton-l/superb_dummy' , 'ks' , split='test' )
UpperCAmelCase : int = np.array(dataset[3]['speech'] , dtype=np.floataa )
UpperCAmelCase : List[str] = audio_classifier(lowercase_ , top_k=4 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=3 ) , [
{'score': 0.981, 'label': 'go'},
{'score': 0.007, 'label': 'up'},
{'score': 0.006, 'label': '_unknown_'},
{'score': 0.001, 'label': 'down'},
] , )
@require_tf
@unittest.skip('Audio classification is not implemented for TF' )
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
pass
| 695 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = """blip_2_vision_model"""
def __init__( self : List[str] , lowercase_ : int=1_408 , lowercase_ : Tuple=6_144 , lowercase_ : Dict=39 , lowercase_ : Optional[int]=16 , lowercase_ : str=224 , lowercase_ : Any=14 , lowercase_ : int="gelu" , lowercase_ : int=0.0_0001 , lowercase_ : Optional[int]=0.0 , lowercase_ : Dict=1E-10 , lowercase_ : List[str]=True , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
super().__init__(**lowercase_ )
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : str = patch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : str = qkv_bias
@classmethod
def UpperCAmelCase_ ( cls : List[str] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip_2_qformer"""
def __init__( self : Tuple , lowercase_ : Union[str, Any]=30_522 , lowercase_ : int=768 , lowercase_ : Dict=12 , lowercase_ : Dict=12 , lowercase_ : int=3_072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Tuple=512 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=1E-12 , lowercase_ : str=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Optional[int]=2 , lowercase_ : str=1_408 , **lowercase_ : Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Any = layer_norm_eps
UpperCAmelCase : Dict = position_embedding_type
UpperCAmelCase : Any = cross_attention_frequency
UpperCAmelCase : Any = encoder_hidden_size
@classmethod
def UpperCAmelCase_ ( cls : List[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : List[str] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip-2"""
UpperCAmelCase_ : Any = True
def __init__( self : Union[str, Any] , lowercase_ : List[Any]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=None , lowercase_ : Dict=32 , **lowercase_ : Union[str, Any] ) -> Any:
super().__init__(**lowercase_ )
if vision_config is None:
UpperCAmelCase : Union[str, Any] = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
UpperCAmelCase : int = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
UpperCAmelCase : Dict = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
UpperCAmelCase : str = BlipaVisionConfig(**lowercase_ )
UpperCAmelCase : str = BlipaQFormerConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
UpperCAmelCase : int = CONFIG_MAPPING[text_model_type](**lowercase_ )
UpperCAmelCase : Optional[int] = self.text_config.tie_word_embeddings
UpperCAmelCase : Dict = self.text_config.is_encoder_decoder
UpperCAmelCase : Tuple = num_query_tokens
UpperCAmelCase : Tuple = self.vision_config.hidden_size
UpperCAmelCase : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase : Union[str, Any] = 1.0
UpperCAmelCase : Union[str, Any] = 0.02
@classmethod
def UpperCAmelCase_ ( cls : Dict , lowercase_ : BlipaVisionConfig , lowercase_ : BlipaQFormerConfig , lowercase_ : PretrainedConfig , **lowercase_ : List[Any] , ) -> Tuple:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowercase_ , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
UpperCAmelCase : Optional[int] = self.qformer_config.to_dict()
UpperCAmelCase : List[str] = self.text_config.to_dict()
UpperCAmelCase : str = self.__class__.model_type
return output
| 695 | 1 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
if len(UpperCAmelCase_ ) <= 1:
return lst
UpperCAmelCase : List[str] = 1
while i < len(UpperCAmelCase_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = lst[i], lst[i - 1]
i -= 1
if i == 0:
UpperCAmelCase : List[str] = 1
return lst
if __name__ == "__main__":
lowercase__ = input("Enter numbers separated by a comma:\n").strip()
lowercase__ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 695 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase__ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
lowercase__ = json.load(f)
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict , lowercase_ : Dict ) -> Tuple:
return FSMTTokenizer.from_pretrained(lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> Tuple:
UpperCAmelCase : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(lowercase_ ).to(lowercase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int , lowercase_ : Any ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCAmelCase : List[str] = f"""facebook/wmt19-{pair}"""
UpperCAmelCase : Optional[int] = self.get_tokenizer(lowercase_ )
UpperCAmelCase : int = self.get_model(lowercase_ )
UpperCAmelCase : List[Any] = bleu_data[pair]['src']
UpperCAmelCase : Optional[int] = bleu_data[pair]['tgt']
UpperCAmelCase : Any = tokenizer(lowercase_ , return_tensors='pt' , truncation=lowercase_ , padding='longest' ).to(lowercase_ )
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase : List[Any] = tokenizer.batch_decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
UpperCAmelCase : Any = calculate_bleu(lowercase_ , lowercase_ )
print(lowercase_ )
self.assertGreaterEqual(scores['bleu'] , lowercase_ )
| 695 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase__ = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n"
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=8 ):
UpperCAmelCase : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase : int = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : UNetaDConditionModel , lowercase_ : DDPMScheduler , lowercase_ : VQModel , ) -> str:
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
UpperCAmelCase : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int , lowercase_ : Any , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> int:
if latents is None:
UpperCAmelCase : Union[str, Any] = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCAmelCase : Optional[int] = latents.to(lowercase_ )
UpperCAmelCase : List[Any] = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : int=0 ) -> Optional[int]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase : Optional[int] = torch.device(f"""cuda:{gpu_id}""" )
UpperCAmelCase : Tuple = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : List[Any]=0 ) -> Optional[int]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase : Optional[int] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
UpperCAmelCase : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self : Union[str, Any] , lowercase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ : int = 512 , lowercase_ : int = 512 , lowercase_ : int = 100 , lowercase_ : float = 4.0 , lowercase_ : int = 1 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , ) -> int:
UpperCAmelCase : List[str] = self._execution_device
UpperCAmelCase : int = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : Dict = torch.cat(lowercase_ , dim=0 )
UpperCAmelCase : Any = image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : Tuple = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase : Tuple = image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase : List[str] = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
UpperCAmelCase : str = self.scheduler.timesteps
UpperCAmelCase : Tuple = self.unet.config.in_channels
UpperCAmelCase , UpperCAmelCase : Tuple = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
# create initial latent
UpperCAmelCase : int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase_ , lowercase_ , lowercase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase : str = {'image_embeds': image_embeds}
UpperCAmelCase : str = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase , UpperCAmelCase : Any = noise_pred.chunk(2 )
UpperCAmelCase , UpperCAmelCase : str = variance_pred.chunk(2 )
UpperCAmelCase : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase , UpperCAmelCase : str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : Any = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
UpperCAmelCase : int = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase : Optional[Any] = image * 0.5 + 0.5
UpperCAmelCase : str = image.clamp(0 , 1 )
UpperCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase : Union[str, Any] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 695 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """pix2struct_text_model"""
UpperCAmelCase_ : Union[str, Any] = ["""past_key_values"""]
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : int , lowercase_ : str=50_244 , lowercase_ : Tuple=768 , lowercase_ : List[Any]=64 , lowercase_ : List[Any]=2_048 , lowercase_ : Optional[Any]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=32 , lowercase_ : List[str]=128 , lowercase_ : List[Any]=0.1 , lowercase_ : List[str]=1E-6 , lowercase_ : Union[str, Any]=1.0 , lowercase_ : Dict="gelu_new" , lowercase_ : Any=0 , lowercase_ : Any=False , lowercase_ : List[Any]=0 , lowercase_ : Tuple=1 , lowercase_ : List[str]=False , lowercase_ : List[Any]=True , **lowercase_ : Union[str, Any] , ) -> Dict:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : int = hidden_size
UpperCAmelCase : List[Any] = d_kv
UpperCAmelCase : Any = d_ff
UpperCAmelCase : List[str] = num_layers
UpperCAmelCase : str = num_heads
UpperCAmelCase : List[Any] = relative_attention_num_buckets
UpperCAmelCase : Tuple = relative_attention_max_distance
UpperCAmelCase : str = dropout_rate
UpperCAmelCase : Optional[int] = layer_norm_epsilon
UpperCAmelCase : int = initializer_factor
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : List[Any] = eos_token_id
UpperCAmelCase : Union[str, Any] = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase : List[str] = dense_act_fn
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , tie_word_embeddings=lowercase_ , is_decoder=lowercase_ , **lowercase_ , )
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : str = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : Any = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = """pix2struct_vision_model"""
def __init__( self : str , lowercase_ : Any=768 , lowercase_ : Union[str, Any]=768 , lowercase_ : Union[str, Any]=2_048 , lowercase_ : Tuple=64 , lowercase_ : Dict=12 , lowercase_ : Optional[int]=12 , lowercase_ : int="gelu_new" , lowercase_ : List[Any]=1E-6 , lowercase_ : Optional[int]=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : str=1E-10 , lowercase_ : Dict=1.0 , lowercase_ : int=4_096 , lowercase_ : Tuple=32 , lowercase_ : Any=128 , **lowercase_ : Any , ) -> Tuple:
super().__init__(**lowercase_ )
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : Any = patch_embed_hidden_size
UpperCAmelCase : Optional[int] = d_ff
UpperCAmelCase : Dict = dropout_rate
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : str = initializer_factor
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : Union[str, Any] = dense_act_fn
UpperCAmelCase : Dict = seq_len
UpperCAmelCase : Optional[int] = relative_attention_num_buckets
UpperCAmelCase : Union[str, Any] = relative_attention_max_distance
UpperCAmelCase : str = d_kv
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Any ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Tuple = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : List[str] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """pix2struct"""
UpperCAmelCase_ : Dict = True
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : int=None , lowercase_ : Optional[Any]=1.0 , lowercase_ : List[str]=0.02 , lowercase_ : str=False , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , **lowercase_ : Optional[Any] , ) -> str:
super().__init__(tie_word_embeddings=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ )
if text_config is None:
UpperCAmelCase : Optional[int] = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
UpperCAmelCase : List[str] = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
UpperCAmelCase : Optional[Any] = PixaStructTextConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = PixaStructVisionConfig(**lowercase_ )
UpperCAmelCase : Optional[Any] = self.text_config.decoder_start_token_id
UpperCAmelCase : str = self.text_config.pad_token_id
UpperCAmelCase : Optional[int] = self.text_config.eos_token_id
UpperCAmelCase : Union[str, Any] = initializer_factor
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : str = is_vqa
@classmethod
def UpperCAmelCase_ ( cls : Tuple , lowercase_ : PixaStructTextConfig , lowercase_ : PixaStructVisionConfig , **lowercase_ : str ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.text_config.to_dict()
UpperCAmelCase : Dict = self.vision_config.to_dict()
UpperCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 695 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase : int = TFAutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' )
UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('google/mt5-small' )
UpperCAmelCase : Optional[Any] = tokenizer('Hello there' , return_tensors='tf' ).input_ids
UpperCAmelCase : List[Any] = tokenizer('Hi I am' , return_tensors='tf' ).input_ids
UpperCAmelCase : Optional[Any] = model(lowercase_ , labels=lowercase_ ).loss
UpperCAmelCase : Dict = -tf.math.reduce_mean(lowercase_ ).numpy()
UpperCAmelCase : Any = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 695 |
'''simple docstring'''
import baseaa
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baaencode(string.encode('utf-8' ) )
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baadecode(UpperCAmelCase_ ).decode('utf-8' )
if __name__ == "__main__":
lowercase__ = "Hello World!"
lowercase__ = baseaa_encode(test)
print(encoded)
lowercase__ = baseaa_decode(encoded)
print(decoded)
| 695 | 1 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 50 ):
UpperCAmelCase : Optional[int] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if dataset.ndim != value_array.ndim:
UpperCAmelCase : str = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(UpperCAmelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCAmelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(UpperCAmelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCAmelCase : List[str] = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(UpperCAmelCase_ )
UpperCAmelCase : str = []
for value in value_array:
UpperCAmelCase : Optional[Any] = euclidean(UpperCAmelCase_ , dataset[0] )
UpperCAmelCase : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCAmelCase : Tuple = euclidean(UpperCAmelCase_ , UpperCAmelCase_ )
if dist > temp_dist:
UpperCAmelCase : List[str] = temp_dist
UpperCAmelCase : str = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) / (norm(UpperCAmelCase_ ) * norm(UpperCAmelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 1 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
lowercase__ = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
lowercase__ = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
lowercase__ = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Any ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def UpperCAmelCase_ ( self : int , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=None , lowercase_ : Any=False , lowercase_ : Union[str, Any]=False , lowercase_ : str=False , ) -> int:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
UpperCAmelCase : Optional[int] = np.array([re.sub(lowercase_ , '' , lowercase_ ) for x in predictions] )
UpperCAmelCase : Tuple = np.array([re.sub(lowercase_ , '' , lowercase_ ) for x in references] )
else:
UpperCAmelCase : str = np.asarray(lowercase_ )
UpperCAmelCase : List[str] = np.asarray(lowercase_ )
if ignore_case:
UpperCAmelCase : Dict = np.char.lower(lowercase_ )
UpperCAmelCase : str = np.char.lower(lowercase_ )
if ignore_punctuation:
UpperCAmelCase : Tuple = string.punctuation.maketrans('' , '' , string.punctuation )
UpperCAmelCase : Union[str, Any] = np.char.translate(lowercase_ , table=lowercase_ )
UpperCAmelCase : Optional[int] = np.char.translate(lowercase_ , table=lowercase_ )
if ignore_numbers:
UpperCAmelCase : Union[str, Any] = string.digits.maketrans('' , '' , string.digits )
UpperCAmelCase : Tuple = np.char.translate(lowercase_ , table=lowercase_ )
UpperCAmelCase : Any = np.char.translate(lowercase_ , table=lowercase_ )
UpperCAmelCase : Union[str, Any] = predictions == references
return {"exact_match": np.mean(lowercase_ ) * 100}
| 695 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase__ = get_tests_dir("fixtures")
lowercase__ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
lowercase__ = get_tests_dir("fixtures/dummy-config.json")
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : Optional[Any] = 0
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ ).to_dict()
config_dict.pop('feature_extractor_type' )
UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor(**lowercase_ )
# save in new folder
model_config.save_pretrained(lowercase_ )
config.save_pretrained(lowercase_ )
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(lowercase_ )
# make sure private variable is not incorrectly saved
UpperCAmelCase : List[Any] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('bert-base' )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
with self.assertRaisesRegex(
lowercase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase : int = AutoFeatureExtractor.from_pretrained(lowercase_ , revision='aaaaaa' )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Any = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase : Dict = CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase_ ( self : int ) -> Tuple:
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = True
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# If remote code is not set, the default is to use local
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(lowercase_ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 695 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase : Tuple = tempfile.mkdtemp()
UpperCAmelCase : List[Any] = BlipImageProcessor()
UpperCAmelCase : str = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel' )
UpperCAmelCase : Optional[Any] = BlipProcessor(lowercase_ , lowercase_ )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self : List[Any] , **lowercase_ : int ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).tokenizer
def UpperCAmelCase_ ( self : int , **lowercase_ : Union[str, Any] ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).image_processor
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase : str = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase : Optional[int] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCAmelCase : str = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
UpperCAmelCase : List[str] = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def UpperCAmelCase_ ( self : str ) -> Any:
UpperCAmelCase : List[Any] = self.get_image_processor()
UpperCAmelCase : List[str] = self.get_tokenizer()
UpperCAmelCase : int = BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
UpperCAmelCase : str = self.prepare_image_inputs()
UpperCAmelCase : Dict = image_processor(lowercase_ , return_tensors='np' )
UpperCAmelCase : Optional[Any] = processor(images=lowercase_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
UpperCAmelCase : Optional[Any] = self.get_image_processor()
UpperCAmelCase : List[Any] = self.get_tokenizer()
UpperCAmelCase : Optional[int] = BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
UpperCAmelCase : Tuple = 'lower newer'
UpperCAmelCase : Tuple = processor(text=lowercase_ )
UpperCAmelCase : List[Any] = tokenizer(lowercase_ , return_token_type_ids=lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase_ ( self : int ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = self.get_image_processor()
UpperCAmelCase : Optional[int] = self.get_tokenizer()
UpperCAmelCase : str = BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
UpperCAmelCase : List[str] = 'lower newer'
UpperCAmelCase : Any = self.prepare_image_inputs()
UpperCAmelCase : List[Any] = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase : str = self.get_image_processor()
UpperCAmelCase : Any = self.get_tokenizer()
UpperCAmelCase : Optional[Any] = BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
UpperCAmelCase : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase : List[str] = processor.batch_decode(lowercase_ )
UpperCAmelCase : str = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = self.get_image_processor()
UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase : Dict = BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
UpperCAmelCase : Optional[int] = 'lower newer'
UpperCAmelCase : List[str] = self.prepare_image_inputs()
UpperCAmelCase : Optional[Any] = processor(text=lowercase_ , images=lowercase_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 695 |
'''simple docstring'''
from datetime import datetime
import requests
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
UpperCAmelCase : List[str] = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(UpperCAmelCase_ ).content
if __name__ == "__main__":
lowercase__ = input("Enter Video/IGTV url: ").strip()
lowercase__ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 695 | 1 |
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
lowercase__ = 6378137.0
lowercase__ = 6356752.314245
lowercase__ = 6378137
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Any = (AXIS_A - AXIS_B) / AXIS_A
UpperCAmelCase : int = atan((1 - flattening) * tan(radians(UpperCAmelCase_ ) ) )
UpperCAmelCase : Any = atan((1 - flattening) * tan(radians(UpperCAmelCase_ ) ) )
UpperCAmelCase : Dict = radians(UpperCAmelCase_ )
UpperCAmelCase : Any = radians(UpperCAmelCase_ )
# Equation
UpperCAmelCase : int = sin((phi_a - phi_a) / 2 )
UpperCAmelCase : Optional[Any] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
UpperCAmelCase : Dict = sqrt(sin_sq_phi + (cos(UpperCAmelCase_ ) * cos(UpperCAmelCase_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10**9 ):
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : List[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCAmelCase : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 1 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "spiece.model"}
lowercase__ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
lowercase__ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
lowercase__ = 0
lowercase__ = 1
lowercase__ = 2
lowercase__ = 3
lowercase__ = 4
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : int = """left"""
def __init__( self : Optional[int] , lowercase_ : List[str] , lowercase_ : str=False , lowercase_ : Dict=True , lowercase_ : Any=False , lowercase_ : List[Any]="<s>" , lowercase_ : str="</s>" , lowercase_ : Union[str, Any]="<unk>" , lowercase_ : Optional[int]="<sep>" , lowercase_ : Dict="<pad>" , lowercase_ : Any="<cls>" , lowercase_ : Tuple="<mask>" , lowercase_ : Dict=["<eop>", "<eod>"] , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : Optional[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : Tuple = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
UpperCAmelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
UpperCAmelCase : List[str] = 3
UpperCAmelCase : Any = do_lower_case
UpperCAmelCase : Optional[Any] = remove_space
UpperCAmelCase : Optional[Any] = keep_accents
UpperCAmelCase : str = vocab_file
UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
return len(self.sp_model )
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
UpperCAmelCase : Optional[Any] = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase : Dict = self.__dict__.copy()
UpperCAmelCase : Dict = None
return state
def __setstate__( self : List[str] , lowercase_ : Tuple ) -> List[str]:
UpperCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase : int = {}
UpperCAmelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self : Any , lowercase_ : Dict ) -> Optional[Any]:
if self.remove_space:
UpperCAmelCase : int = ' '.join(inputs.strip().split() )
else:
UpperCAmelCase : Any = inputs
UpperCAmelCase : Union[str, Any] = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
UpperCAmelCase : Any = unicodedata.normalize('NFKD' , lowercase_ )
UpperCAmelCase : Optional[Any] = ''.join([c for c in outputs if not unicodedata.combining(lowercase_ )] )
if self.do_lower_case:
UpperCAmelCase : Tuple = outputs.lower()
return outputs
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : str ) -> List[str]:
UpperCAmelCase : Optional[Any] = self.preprocess_text(lowercase_ )
UpperCAmelCase : Any = self.sp_model.encode(lowercase_ , out_type=lowercase_ )
UpperCAmelCase : int = []
for piece in pieces:
if len(lowercase_ ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
UpperCAmelCase : int = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowercase_ , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCAmelCase : List[str] = cur_pieces[1:]
else:
UpperCAmelCase : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowercase_ )
else:
new_pieces.append(lowercase_ )
return new_pieces
def UpperCAmelCase_ ( self : Dict , lowercase_ : Optional[Any] ) -> Dict:
return self.sp_model.PieceToId(lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Any ) -> Optional[int]:
return self.sp_model.IdToPiece(lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : Union[str, Any] ) -> int:
UpperCAmelCase : Optional[int] = ''.join(lowercase_ ).replace(lowercase_ , ' ' ).strip()
return out_string
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : List[int] , lowercase_ : bool = False , lowercase_ : bool = None , lowercase_ : bool = True , **lowercase_ : Any , ) -> str:
UpperCAmelCase : List[Any] = kwargs.pop('use_source_tokenizer' , lowercase_ )
UpperCAmelCase : Optional[int] = self.convert_ids_to_tokens(lowercase_ , skip_special_tokens=lowercase_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCAmelCase : Optional[int] = []
UpperCAmelCase : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowercase_ ) )
UpperCAmelCase : Optional[int] = []
sub_texts.append(lowercase_ )
else:
current_sub_text.append(lowercase_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowercase_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCAmelCase : List[str] = ''.join(lowercase_ )
UpperCAmelCase : Dict = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCAmelCase : List[Any] = self.clean_up_tokenization(lowercase_ )
return clean_text
else:
return text
def UpperCAmelCase_ ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase : Dict = [self.sep_token_id]
UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is not None:
return ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1, 1]
return ([0] * len(lowercase_ )) + [1, 1]
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase : str = [self.sep_token_id]
UpperCAmelCase : Optional[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowercase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase : Optional[Any] = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , 'wb' ) as fi:
UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 695 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase : str = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
UpperCAmelCase : Any = self.dummy_uncond_unet
UpperCAmelCase : Tuple = DDIMScheduler()
UpperCAmelCase : Optional[Any] = self.dummy_vq_model
UpperCAmelCase : str = LDMPipeline(unet=lowercase_ , vqvae=lowercase_ , scheduler=lowercase_ )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : int = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' ).images
UpperCAmelCase : int = torch.manual_seed(0 )
UpperCAmelCase : Tuple = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : List[str] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCAmelCase : Tuple = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : Any = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Tuple = torch.manual_seed(0 )
UpperCAmelCase : Dict = ldm(generator=lowercase_ , num_inference_steps=5 , output_type='numpy' ).images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCAmelCase : Any = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 695 | 1 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
UpperCAmelCase : List[Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCAmelCase : Optional[int] = 1
if upper_limit > 0:
UpperCAmelCase : int = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(UpperCAmelCase_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("\n********* Catalan Numbers Using Dynamic Programming ************\n")
print("\n*** Enter -1 at any time to quit ***")
print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="")
try:
while True:
lowercase__ = int(input().strip())
if N < 0:
print("\n********* Goodbye!! ************")
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print("Try another upper limit for the sequence: ", end="")
except (NameError, ValueError):
print("\n********* Invalid input, goodbye! ************\n")
import doctest
doctest.testmod()
| 695 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
UpperCAmelCase : Dict = self.dummy_uncond_unet
UpperCAmelCase : Dict = KarrasVeScheduler()
UpperCAmelCase : str = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : Dict = 'google/ncsnpp-celebahq-256'
UpperCAmelCase : Any = UNetaDModel.from_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = KarrasVeScheduler()
UpperCAmelCase : Dict = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = pipe(num_inference_steps=20 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 695 | 1 |
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
lowercase__ = logging.get_logger(__name__)
enable_full_determinism()
class A_ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = UNetaDModel
UpperCAmelCase_ : List[str] = """sample"""
@property
def UpperCAmelCase_ ( self : str ) -> str:
UpperCAmelCase : Optional[Any] = 4
UpperCAmelCase : List[Any] = 3
UpperCAmelCase : Optional[int] = (32, 32)
UpperCAmelCase : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase_ )
UpperCAmelCase : Dict = torch.tensor([10] ).to(lowercase_ )
return {"sample": noise, "timestep": time_step}
@property
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
return (3, 32, 32)
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
return (3, 32, 32)
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
UpperCAmelCase : Tuple = {
'block_out_channels': (32, 64),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 32,
}
UpperCAmelCase : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
class A_ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = UNetaDModel
UpperCAmelCase_ : List[Any] = """sample"""
@property
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
UpperCAmelCase : Dict = 4
UpperCAmelCase : int = 4
UpperCAmelCase : Any = (32, 32)
UpperCAmelCase : List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase_ )
UpperCAmelCase : Optional[Any] = torch.tensor([10] ).to(lowercase_ )
return {"sample": noise, "timestep": time_step}
@property
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
return (4, 32, 32)
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
return (4, 32, 32)
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase : Tuple = {
'sample_size': 32,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (32, 64),
'attention_head_dim': 32,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
UpperCAmelCase : List[Any] = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : Tuple = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(lowercase_ )
UpperCAmelCase : Any = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
UpperCAmelCase , UpperCAmelCase : List[Any] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=lowercase_ )
model.to(lowercase_ )
UpperCAmelCase : Dict = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
UpperCAmelCase , UpperCAmelCase : str = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=lowercase_ )
model_accelerate.to(lowercase_ )
model_accelerate.eval()
UpperCAmelCase : Any = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase : List[str] = noise.to(lowercase_ )
UpperCAmelCase : Optional[Any] = torch.tensor([10] * noise.shape[0] ).to(lowercase_ )
UpperCAmelCase : int = model_accelerate(lowercase_ , lowercase_ )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
UpperCAmelCase , UpperCAmelCase : Dict = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' , output_loading_info=lowercase_ , low_cpu_mem_usage=lowercase_ )
model_normal_load.to(lowercase_ )
model_normal_load.eval()
UpperCAmelCase : str = model_normal_load(lowercase_ , lowercase_ )['sample']
assert torch_all_close(lowercase_ , lowercase_ , rtol=1E-3 )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase : int = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(lowercase_ )
UpperCAmelCase : int = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase : List[str] = noise.to(lowercase_ )
UpperCAmelCase : str = torch.tensor([10] * noise.shape[0] ).to(lowercase_ )
with torch.no_grad():
UpperCAmelCase : Tuple = model(lowercase_ , lowercase_ ).sample
UpperCAmelCase : Any = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCAmelCase : Any = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(lowercase_ , lowercase_ , rtol=1E-3 ) )
class A_ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : str = UNetaDModel
UpperCAmelCase_ : Tuple = """sample"""
@property
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str=(32, 32) ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = 4
UpperCAmelCase : Optional[int] = 3
UpperCAmelCase : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase_ )
UpperCAmelCase : Any = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=lowercase_ )
return {"sample": noise, "timestep": time_step}
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
return (3, 32, 32)
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
return (3, 32, 32)
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase : str = {
'block_out_channels': [32, 64, 64, 64],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1E-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
UpperCAmelCase : Dict = self.dummy_input
return init_dict, inputs_dict
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
UpperCAmelCase , UpperCAmelCase : List[Any] = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' , output_loading_info=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(lowercase_ )
UpperCAmelCase : Tuple = self.dummy_input
UpperCAmelCase : Tuple = floats_tensor((4, 3) + (256, 256) ).to(lowercase_ )
UpperCAmelCase : Dict = noise
UpperCAmelCase : Any = model(**lowercase_ )
assert image is not None, "Make sure output is not None"
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
UpperCAmelCase : List[Any] = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(lowercase_ )
UpperCAmelCase : int = 4
UpperCAmelCase : str = 3
UpperCAmelCase : Any = (256, 256)
UpperCAmelCase : Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(lowercase_ )
UpperCAmelCase : str = torch.tensor(batch_size * [1E-4] ).to(lowercase_ )
with torch.no_grad():
UpperCAmelCase : str = model(lowercase_ , lowercase_ ).sample
UpperCAmelCase : Dict = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCAmelCase : Any = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(lowercase_ , lowercase_ , rtol=1E-2 ) )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase : List[Any] = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(lowercase_ )
UpperCAmelCase : List[Any] = 4
UpperCAmelCase : List[str] = 3
UpperCAmelCase : List[str] = (32, 32)
UpperCAmelCase : Union[str, Any] = torch.ones((batch_size, num_channels) + sizes ).to(lowercase_ )
UpperCAmelCase : Optional[int] = torch.tensor(batch_size * [1E-4] ).to(lowercase_ )
with torch.no_grad():
UpperCAmelCase : Optional[int] = model(lowercase_ , lowercase_ ).sample
UpperCAmelCase : Tuple = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCAmelCase : Dict = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(lowercase_ , lowercase_ , rtol=1E-2 ) )
def UpperCAmelCase_ ( self : Dict ) -> Any:
# not required for this model
pass
| 695 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = """autoformer"""
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Dict , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : str = "student_t" , lowercase_ : str = "nll" , lowercase_ : int = 1 , lowercase_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowercase_ : bool = True , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : Optional[List[int]] = None , lowercase_ : Optional[List[int]] = None , lowercase_ : int = 64 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 32 , lowercase_ : int = 32 , lowercase_ : str = "gelu" , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : int = 100 , lowercase_ : float = 0.02 , lowercase_ : bool = True , lowercase_ : Union[str, Any]=True , lowercase_ : int = 10 , lowercase_ : int = 25 , lowercase_ : int = 3 , **lowercase_ : str , ) -> Dict:
# time series specific configuration
UpperCAmelCase : int = prediction_length
UpperCAmelCase : Optional[Any] = context_length if context_length is not None else prediction_length
UpperCAmelCase : List[Any] = distribution_output
UpperCAmelCase : Tuple = loss
UpperCAmelCase : Dict = input_size
UpperCAmelCase : Dict = num_time_features
UpperCAmelCase : Tuple = lags_sequence
UpperCAmelCase : str = scaling
UpperCAmelCase : Optional[int] = num_dynamic_real_features
UpperCAmelCase : List[str] = num_static_real_features
UpperCAmelCase : Optional[int] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : int = cardinality
else:
UpperCAmelCase : Union[str, Any] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : Any = embedding_dimension
else:
UpperCAmelCase : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase : Dict = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase : Optional[int] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : Dict = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_attention_heads
UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase : str = decoder_ffn_dim
UpperCAmelCase : str = encoder_layers
UpperCAmelCase : Optional[Any] = decoder_layers
UpperCAmelCase : int = dropout
UpperCAmelCase : Any = attention_dropout
UpperCAmelCase : Tuple = activation_dropout
UpperCAmelCase : str = encoder_layerdrop
UpperCAmelCase : Union[str, Any] = decoder_layerdrop
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Dict = init_std
UpperCAmelCase : Union[str, Any] = use_cache
# Autoformer
UpperCAmelCase : Any = label_length
UpperCAmelCase : List[Any] = moving_average
UpperCAmelCase : Optional[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCAmelCase_ ( self : List[str] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 695 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A_ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = StableDiffusionPanoramaPipeline
UpperCAmelCase_ : Optional[int] = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase_ : Any = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase_ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase_ : str = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
UpperCAmelCase : Any = DDIMScheduler()
torch.manual_seed(0 )
UpperCAmelCase : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCAmelCase : Union[str, Any] = CLIPTextModel(lowercase_ )
UpperCAmelCase : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCAmelCase : Dict = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCAmelCase_ ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : List[str]=0 ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = torch.manual_seed(lowercase_ )
UpperCAmelCase : Any = {
'prompt': 'a photo of the dolomites',
'generator': generator,
# Setting height and width to None to prevent OOMs on CPU.
'height': None,
'width': None,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase_ ( self : List[Any] ) -> int:
UpperCAmelCase : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : int = self.get_dummy_components()
UpperCAmelCase : str = StableDiffusionPanoramaPipeline(**lowercase_ )
UpperCAmelCase : Optional[Any] = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : str = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase : str = sd_pipe(**lowercase_ ).images
UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : Optional[int] = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
UpperCAmelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : int = self.get_dummy_components()
UpperCAmelCase : Any = StableDiffusionPanoramaPipeline(**lowercase_ )
UpperCAmelCase : Tuple = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : int = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase : str = 'french fries'
UpperCAmelCase : Optional[int] = sd_pipe(**lowercase_ , negative_prompt=lowercase_ )
UpperCAmelCase : Dict = output.images
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : Optional[int] = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : List[Any] ) -> str:
UpperCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : Dict = self.get_dummy_components()
UpperCAmelCase : Optional[int] = StableDiffusionPanoramaPipeline(**lowercase_ )
UpperCAmelCase : List[Any] = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Tuple = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase : Dict = sd_pipe(**lowercase_ , view_batch_size=2 )
UpperCAmelCase : Union[str, Any] = output.images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : Union[str, Any] = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : str = self.get_dummy_components()
UpperCAmelCase : Union[str, Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' )
UpperCAmelCase : Any = StableDiffusionPanoramaPipeline(**lowercase_ )
UpperCAmelCase : Tuple = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Optional[int] = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase : Any = sd_pipe(**lowercase_ ).images
UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : List[Any] = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
UpperCAmelCase : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : Tuple = self.get_dummy_components()
UpperCAmelCase : Union[str, Any] = PNDMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , skip_prk_steps=lowercase_ )
UpperCAmelCase : int = StableDiffusionPanoramaPipeline(**lowercase_ )
UpperCAmelCase : Optional[int] = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Dict = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase : Any = sd_pipe(**lowercase_ ).images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : Any = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : Any=0 ) -> Tuple:
UpperCAmelCase : Dict = torch.manual_seed(lowercase_ )
UpperCAmelCase : Union[str, Any] = {
'prompt': 'a photo of the dolomites',
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase_ ( self : Any ) -> Dict:
UpperCAmelCase : Dict = 'stabilityai/stable-diffusion-2-base'
UpperCAmelCase : Dict = DDIMScheduler.from_pretrained(lowercase_ , subfolder='scheduler' )
UpperCAmelCase : Any = StableDiffusionPanoramaPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
UpperCAmelCase : Dict = self.get_inputs()
UpperCAmelCase : Optional[int] = pipe(**lowercase_ ).images
UpperCAmelCase : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2_048, 3)
UpperCAmelCase : Dict = np.array(
[
0.3696_8392,
0.2702_5372,
0.3244_6766,
0.2837_9387,
0.3636_3274,
0.3073_3347,
0.2710_0027,
0.2705_4125,
0.2553_6096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : List[str] = StableDiffusionPanoramaPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-base' , safety_checker=lowercase_ )
UpperCAmelCase : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
UpperCAmelCase : Dict = self.get_inputs()
UpperCAmelCase : str = pipe(**lowercase_ ).images
UpperCAmelCase : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2_048, 3)
UpperCAmelCase : List[str] = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase : Any = 0
def callback_fn(lowercase_ : int , lowercase_ : int , lowercase_ : torch.FloatTensor ) -> None:
UpperCAmelCase : Union[str, Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCAmelCase : Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
UpperCAmelCase : Dict = latents[0, -3:, -3:, -1]
UpperCAmelCase : int = np.array(
[
0.1868_1869,
0.3390_7816,
0.536_1276,
0.1443_2865,
-0.0285_6611,
-0.7394_1123,
0.2339_7987,
0.4732_2682,
-0.3782_3164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
UpperCAmelCase : Dict = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
UpperCAmelCase : Dict = latents[0, -3:, -3:, -1]
UpperCAmelCase : List[Any] = np.array(
[
0.1853_9645,
0.3398_7248,
0.537_8559,
0.1443_7142,
-0.0245_5261,
-0.733_8317,
0.2399_0755,
0.4735_6272,
-0.378_6505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
UpperCAmelCase : List[Any] = False
UpperCAmelCase : int = 'stabilityai/stable-diffusion-2-base'
UpperCAmelCase : Optional[Any] = DDIMScheduler.from_pretrained(lowercase_ , subfolder='scheduler' )
UpperCAmelCase : List[Any] = StableDiffusionPanoramaPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ )
UpperCAmelCase : Optional[int] = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
UpperCAmelCase : Tuple = self.get_inputs()
pipe(**lowercase_ , callback=lowercase_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase : List[str] = 'stabilityai/stable-diffusion-2-base'
UpperCAmelCase : Dict = DDIMScheduler.from_pretrained(lowercase_ , subfolder='scheduler' )
UpperCAmelCase : Any = StableDiffusionPanoramaPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ )
UpperCAmelCase : Dict = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase : Optional[int] = self.get_inputs()
UpperCAmelCase : List[Any] = pipe(**lowercase_ )
UpperCAmelCase : Any = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 695 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : int = sin(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = cos(UpperCAmelCase_ )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : Tuple = (1 - _cos) / 2
UpperCAmelCase : Dict = 1 - _cos
UpperCAmelCase : Union[str, Any] = 1 + alpha
UpperCAmelCase : List[str] = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha
UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[str] = cos(UpperCAmelCase_ )
UpperCAmelCase : Tuple = _sin / (2 * q_factor)
UpperCAmelCase : int = (1 + _cos) / 2
UpperCAmelCase : List[Any] = -1 - _cos
UpperCAmelCase : Optional[int] = 1 + alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : str = 1 - alpha
UpperCAmelCase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : List[Any] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : Any = _sin / (2 * q_factor)
UpperCAmelCase : Optional[Any] = _sin / 2
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : int = -ba
UpperCAmelCase : List[str] = 1 + alpha
UpperCAmelCase : int = -2 * _cos
UpperCAmelCase : Optional[int] = 1 - alpha
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : Tuple = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : int = 1 - alpha
UpperCAmelCase : Dict = -2 * _cos
UpperCAmelCase : Any = 1 + alpha
UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase : Union[str, Any] = 10 ** (gain_db / 40)
UpperCAmelCase : int = 1 + alpha * big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha * big_a
UpperCAmelCase : Tuple = 1 + alpha / big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : int = 1 - alpha / big_a
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Dict = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : Any = cos(UpperCAmelCase_ )
UpperCAmelCase : str = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : List[str] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : List[Any] = big_a * (pmc + aaa)
UpperCAmelCase : Optional[int] = 2 * big_a * mpc
UpperCAmelCase : Optional[int] = big_a * (pmc - aaa)
UpperCAmelCase : str = ppmc + aaa
UpperCAmelCase : int = -2 * pmpc
UpperCAmelCase : int = ppmc - aaa
UpperCAmelCase : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : List[Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : Optional[Any] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : Dict = big_a * (ppmc + aaa)
UpperCAmelCase : List[str] = -2 * big_a * pmpc
UpperCAmelCase : int = big_a * (ppmc - aaa)
UpperCAmelCase : Dict = pmc + aaa
UpperCAmelCase : Optional[int] = 2 * mpc
UpperCAmelCase : int = pmc - aaa
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 695 | 1 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : List[str]=0.0 , lowercase_ : Optional[int] = None , lowercase_ : str = "geglu" , lowercase_ : Optional[int] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : str = "layer_norm" , lowercase_ : bool = False , ) -> int:
super().__init__()
UpperCAmelCase : List[str] = only_cross_attention
UpperCAmelCase : str = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
UpperCAmelCase : str = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
f""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
UpperCAmelCase : List[Any] = AdaLayerNorm(lowercase_ , lowercase_ )
elif self.use_ada_layer_norm_zero:
UpperCAmelCase : int = AdaLayerNormZero(lowercase_ , lowercase_ )
else:
UpperCAmelCase : str = nn.LayerNorm(lowercase_ , elementwise_affine=lowercase_ )
UpperCAmelCase : int = Attention(
query_dim=lowercase_ , heads=lowercase_ , dim_head=lowercase_ , dropout=lowercase_ , bias=lowercase_ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=lowercase_ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
UpperCAmelCase : str = (
AdaLayerNorm(lowercase_ , lowercase_ )
if self.use_ada_layer_norm
else nn.LayerNorm(lowercase_ , elementwise_affine=lowercase_ )
)
UpperCAmelCase : Tuple = Attention(
query_dim=lowercase_ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=lowercase_ , dim_head=lowercase_ , dropout=lowercase_ , bias=lowercase_ , upcast_attention=lowercase_ , ) # is self-attn if encoder_hidden_states is none
else:
UpperCAmelCase : str = None
UpperCAmelCase : str = None
# 3. Feed-forward
UpperCAmelCase : List[str] = nn.LayerNorm(lowercase_ , elementwise_affine=lowercase_ )
UpperCAmelCase : List[str] = FeedForward(lowercase_ , dropout=lowercase_ , activation_fn=lowercase_ , final_dropout=lowercase_ )
# let chunk size default to None
UpperCAmelCase : List[Any] = None
UpperCAmelCase : Dict = 0
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : int ) -> str:
# Sets chunk feed-forward
UpperCAmelCase : Dict = chunk_size
UpperCAmelCase : Union[str, Any] = dim
def UpperCAmelCase_ ( self : List[str] , lowercase_ : torch.FloatTensor , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.LongTensor] = None , lowercase_ : Dict[str, Any] = None , lowercase_ : Optional[torch.LongTensor] = None , ) -> List[Any]:
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
UpperCAmelCase : Tuple = self.norma(lowercase_ , lowercase_ )
elif self.use_ada_layer_norm_zero:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = self.norma(
lowercase_ , lowercase_ , lowercase_ , hidden_dtype=hidden_states.dtype )
else:
UpperCAmelCase : str = self.norma(lowercase_ )
UpperCAmelCase : List[str] = cross_attention_kwargs if cross_attention_kwargs is not None else {}
UpperCAmelCase : Dict = self.attna(
lowercase_ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=lowercase_ , **lowercase_ , )
if self.use_ada_layer_norm_zero:
UpperCAmelCase : Optional[Any] = gate_msa.unsqueeze(1 ) * attn_output
UpperCAmelCase : str = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
UpperCAmelCase : str = (
self.norma(lowercase_ , lowercase_ ) if self.use_ada_layer_norm else self.norma(lowercase_ )
)
UpperCAmelCase : Optional[Any] = self.attna(
lowercase_ , encoder_hidden_states=lowercase_ , attention_mask=lowercase_ , **lowercase_ , )
UpperCAmelCase : List[Any] = attn_output + hidden_states
# 3. Feed-forward
UpperCAmelCase : Dict = self.norma(lowercase_ )
if self.use_ada_layer_norm_zero:
UpperCAmelCase : Dict = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
UpperCAmelCase : List[str] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
UpperCAmelCase : str = torch.cat(
[self.ff(lowercase_ ) for hid_slice in norm_hidden_states.chunk(lowercase_ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
UpperCAmelCase : List[Any] = self.ff(lowercase_ )
if self.use_ada_layer_norm_zero:
UpperCAmelCase : Union[str, Any] = gate_mlp.unsqueeze(1 ) * ff_output
UpperCAmelCase : Tuple = ff_output + hidden_states
return hidden_states
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , lowercase_ : int , lowercase_ : Optional[int] = None , lowercase_ : int = 4 , lowercase_ : float = 0.0 , lowercase_ : str = "geglu" , lowercase_ : bool = False , ) -> Optional[Any]:
super().__init__()
UpperCAmelCase : List[str] = int(dim * mult )
UpperCAmelCase : List[Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
UpperCAmelCase : List[str] = GELU(lowercase_ , lowercase_ )
if activation_fn == "gelu-approximate":
UpperCAmelCase : Tuple = GELU(lowercase_ , lowercase_ , approximate='tanh' )
elif activation_fn == "geglu":
UpperCAmelCase : Any = GEGLU(lowercase_ , lowercase_ )
elif activation_fn == "geglu-approximate":
UpperCAmelCase : Any = ApproximateGELU(lowercase_ , lowercase_ )
UpperCAmelCase : int = nn.ModuleList([] )
# project in
self.net.append(lowercase_ )
# project dropout
self.net.append(nn.Dropout(lowercase_ ) )
# project out
self.net.append(nn.Linear(lowercase_ , lowercase_ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(lowercase_ ) )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : List[Any] ) -> Optional[int]:
for module in self.net:
UpperCAmelCase : Any = module(lowercase_ )
return hidden_states
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : str = "none" ) -> List[Any]:
super().__init__()
UpperCAmelCase : List[Any] = nn.Linear(lowercase_ , lowercase_ )
UpperCAmelCase : str = approximate
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Optional[int] ) -> List[Any]:
if gate.device.type != "mps":
return F.gelu(lowercase_ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCAmelCase_ ( self : Tuple , lowercase_ : Tuple ) -> Dict:
UpperCAmelCase : int = self.proj(lowercase_ )
UpperCAmelCase : Any = self.gelu(lowercase_ )
return hidden_states
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , lowercase_ : int , lowercase_ : int ) -> Any:
super().__init__()
UpperCAmelCase : Dict = nn.Linear(lowercase_ , dim_out * 2 )
def UpperCAmelCase_ ( self : Any , lowercase_ : List[Any] ) -> List[str]:
if gate.device.type != "mps":
return F.gelu(lowercase_ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCAmelCase_ ( self : Dict , lowercase_ : Any ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : List[str] = self.proj(lowercase_ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(lowercase_ )
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : int , lowercase_ : int ) -> int:
super().__init__()
UpperCAmelCase : Tuple = nn.Linear(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : int ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = self.proj(lowercase_ )
return x * torch.sigmoid(1.702 * x )
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , lowercase_ : Tuple , lowercase_ : List[str] ) -> List[str]:
super().__init__()
UpperCAmelCase : int = nn.Embedding(lowercase_ , lowercase_ )
UpperCAmelCase : List[Any] = nn.SiLU()
UpperCAmelCase : Dict = nn.Linear(lowercase_ , embedding_dim * 2 )
UpperCAmelCase : List[str] = nn.LayerNorm(lowercase_ , elementwise_affine=lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] ) -> Tuple:
UpperCAmelCase : List[Any] = self.linear(self.silu(self.emb(lowercase_ ) ) )
UpperCAmelCase , UpperCAmelCase : List[Any] = torch.chunk(lowercase_ , 2 )
UpperCAmelCase : Dict = self.norm(lowercase_ ) * (1 + scale) + shift
return x
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : Any , lowercase_ : Tuple ) -> Optional[int]:
super().__init__()
UpperCAmelCase : Any = CombinedTimestepLabelEmbeddings(lowercase_ , lowercase_ )
UpperCAmelCase : Dict = nn.SiLU()
UpperCAmelCase : List[Any] = nn.Linear(lowercase_ , 6 * embedding_dim , bias=lowercase_ )
UpperCAmelCase : Any = nn.LayerNorm(lowercase_ , elementwise_affine=lowercase_ , eps=1E-6 )
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Any , lowercase_ : Dict=None ) -> Dict:
UpperCAmelCase : Tuple = self.linear(self.silu(self.emb(lowercase_ , lowercase_ , hidden_dtype=lowercase_ ) ) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = emb.chunk(6 , dim=1 )
UpperCAmelCase : List[str] = self.norm(lowercase_ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : Optional[str] = None , lowercase_ : float = 1E-5 ) -> Optional[Any]:
super().__init__()
UpperCAmelCase : str = num_groups
UpperCAmelCase : Dict = eps
if act_fn is None:
UpperCAmelCase : Optional[int] = None
else:
UpperCAmelCase : Dict = get_activation(lowercase_ )
UpperCAmelCase : Any = nn.Linear(lowercase_ , out_dim * 2 )
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str , lowercase_ : Dict ) -> Tuple:
if self.act:
UpperCAmelCase : Tuple = self.act(lowercase_ )
UpperCAmelCase : Any = self.linear(lowercase_ )
UpperCAmelCase : Tuple = emb[:, :, None, None]
UpperCAmelCase , UpperCAmelCase : int = emb.chunk(2 , dim=1 )
UpperCAmelCase : int = F.group_norm(lowercase_ , self.num_groups , eps=self.eps )
UpperCAmelCase : int = x * (1 + scale) + shift
return x
| 695 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowercase__ = TypeVar("T")
class A_ ( Generic[T] ):
'''simple docstring'''
UpperCAmelCase_ : deque[T] # Cache store of keys
UpperCAmelCase_ : set[T] # References of the keys in cache
UpperCAmelCase_ : int = 10 # Maximum capacity of cache
def __init__( self : List[Any] , lowercase_ : int ) -> None:
UpperCAmelCase : Any = deque()
UpperCAmelCase : Dict = set()
if not n:
UpperCAmelCase : Optional[int] = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCAmelCase : str = n
def UpperCAmelCase_ ( self : List[str] , lowercase_ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCAmelCase : Optional[Any] = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> None:
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Union[str, Any] ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 695 | 1 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
# Return True if there is node that has not iterated.
UpperCAmelCase : Dict = [False] * len(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = []
queue.append(UpperCAmelCase_ )
UpperCAmelCase : int = True
while queue:
UpperCAmelCase : Any = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCAmelCase_ )
UpperCAmelCase : Dict = True
UpperCAmelCase : List[Any] = u
return visited[t]
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
# This array is filled by BFS and to store path
UpperCAmelCase : str = [-1] * (len(UpperCAmelCase_ ))
UpperCAmelCase : Optional[int] = 0
while bfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = float('Inf' )
UpperCAmelCase : Optional[int] = sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase : Optional[int] = min(UpperCAmelCase_ , graph[parent[s]][s] )
UpperCAmelCase : Dict = parent[s]
max_flow += path_flow
UpperCAmelCase : Optional[int] = sink
while v != source:
UpperCAmelCase : Any = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase : Dict = parent[v]
return max_flow
lowercase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowercase__ , lowercase__ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 695 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
lowercase__ = {
"gpt-neox-20b": 2048,
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowercase_ : Any=None , lowercase_ : Dict=None , lowercase_ : List[str]=None , lowercase_ : List[Any]="<|endoftext|>" , lowercase_ : List[str]="<|endoftext|>" , lowercase_ : Any="<|endoftext|>" , lowercase_ : List[str]=False , **lowercase_ : Union[str, Any] , ) -> str:
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
UpperCAmelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase_ ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(lowercase_ , pre_tok_state.pop('type' ) )
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Tuple = pre_tok_class(**lowercase_ )
UpperCAmelCase : Any = add_prefix_space
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase : Optional[int] = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : "Conversation" ) -> List[int]:
UpperCAmelCase : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_ ) + [self.eos_token_id] )
if len(lowercase_ ) > self.model_max_length:
UpperCAmelCase : int = input_ids[-self.model_max_length :]
return input_ids
| 695 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Any = """deberta-v2"""
def __init__( self : Any , lowercase_ : List[str]=128_100 , lowercase_ : Optional[Any]=1_536 , lowercase_ : Tuple=24 , lowercase_ : Optional[int]=24 , lowercase_ : List[str]=6_144 , lowercase_ : List[Any]="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Optional[int]=512 , lowercase_ : Optional[Any]=0 , lowercase_ : Optional[int]=0.02 , lowercase_ : List[str]=1E-7 , lowercase_ : List[Any]=False , lowercase_ : List[str]=-1 , lowercase_ : Dict=0 , lowercase_ : List[str]=True , lowercase_ : Optional[int]=None , lowercase_ : List[str]=0 , lowercase_ : Optional[int]="gelu" , **lowercase_ : Optional[Any] , ) -> Dict:
super().__init__(**lowercase_ )
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : Tuple = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : List[Any] = intermediate_size
UpperCAmelCase : Dict = hidden_act
UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : List[Any] = max_position_embeddings
UpperCAmelCase : Tuple = type_vocab_size
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : Optional[int] = relative_attention
UpperCAmelCase : Union[str, Any] = max_relative_positions
UpperCAmelCase : Tuple = pad_token_id
UpperCAmelCase : Optional[int] = position_biased_input
# Backwards compatibility
if type(lowercase_ ) == str:
UpperCAmelCase : Optional[Any] = [x.strip() for x in pos_att_type.lower().split('|' )]
UpperCAmelCase : Union[str, Any] = pos_att_type
UpperCAmelCase : int = vocab_size
UpperCAmelCase : List[Any] = layer_norm_eps
UpperCAmelCase : int = kwargs.get('pooler_hidden_size' , lowercase_ )
UpperCAmelCase : str = pooler_dropout
UpperCAmelCase : Any = pooler_hidden_act
class A_ ( _snake_case ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase : Union[str, Any] = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
return 12
def UpperCAmelCase_ ( self : str , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , lowercase_ : int = 3 , lowercase_ : int = 40 , lowercase_ : int = 40 , lowercase_ : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]:
UpperCAmelCase : Any = super().generate_dummy_inputs(preprocessor=lowercase_ , framework=lowercase_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 695 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = """openai/whisper-base"""
UpperCAmelCase_ : Union[str, Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
UpperCAmelCase_ : Dict = """transcriber"""
UpperCAmelCase_ : int = WhisperProcessor
UpperCAmelCase_ : Optional[int] = WhisperForConditionalGeneration
UpperCAmelCase_ : Dict = ["""audio"""]
UpperCAmelCase_ : Optional[int] = ["""text"""]
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str ) -> Optional[int]:
return self.pre_processor(lowercase_ , return_tensors='pt' ).input_features
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int ) -> List[str]:
return self.model.generate(inputs=lowercase_ )
def UpperCAmelCase_ ( self : str , lowercase_ : List[Any] ) -> List[str]:
return self.pre_processor.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )[0]
| 695 | 1 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Any = tmp_path / 'cache'
UpperCAmelCase : List[str] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : str = ParquetDatasetReader(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ ).read()
_check_parquet_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = tmp_path / 'cache'
UpperCAmelCase : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCAmelCase : Dict = features.copy() if features else default_expected_features
UpperCAmelCase : Dict = (
Features({feature: Value(UpperCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Optional[int] = ParquetDatasetReader(UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_parquet_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : str = tmp_path / 'cache'
UpperCAmelCase : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCAmelCase : Dict = ParquetDatasetReader(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , split=UpperCAmelCase_ ).read()
_check_parquet_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if issubclass(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : List[str] = parquet_path
elif issubclass(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = [parquet_path]
UpperCAmelCase : List[Any] = tmp_path / 'cache'
UpperCAmelCase : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCAmelCase : Dict = ParquetDatasetReader(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_parquet_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=("train",) ):
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
for split in splits:
UpperCAmelCase : Union[str, Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = tmp_path / 'cache'
UpperCAmelCase : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : Optional[Any] = ParquetDatasetReader(
{'train': parquet_path} , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ ).read()
_check_parquet_datasetdict(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : List[str] = tmp_path / 'cache'
UpperCAmelCase : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCAmelCase : int = features.copy() if features else default_expected_features
UpperCAmelCase : str = (
Features({feature: Value(UpperCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Tuple = ParquetDatasetReader({'train': parquet_path} , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_parquet_datasetdict(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if split:
UpperCAmelCase : Tuple = {split: parquet_path}
else:
UpperCAmelCase : Union[str, Any] = 'train'
UpperCAmelCase : List[Any] = {'train': parquet_path, 'test': parquet_path}
UpperCAmelCase : int = tmp_path / 'cache'
UpperCAmelCase : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCAmelCase : int = ParquetDatasetReader(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_parquet_datasetdict(UpperCAmelCase_ , UpperCAmelCase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Dict = ParquetDatasetWriter(UpperCAmelCase_ , tmp_path / 'foo.parquet' )
assert writer.write() > 0
UpperCAmelCase : List[Any] = pq.ParquetFile(tmp_path / 'foo.parquet' )
UpperCAmelCase : Union[str, Any] = pf.read()
assert dataset.data.table == output_table
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : str = str(shared_datadir / 'test_image_rgb.jpg' )
UpperCAmelCase : Any = {'image': [image_path]}
UpperCAmelCase : Union[str, Any] = Features({'image': Image()} )
UpperCAmelCase : Optional[Any] = Dataset.from_dict(UpperCAmelCase_ , features=UpperCAmelCase_ )
UpperCAmelCase : str = ParquetDatasetWriter(UpperCAmelCase_ , tmp_path / 'foo.parquet' )
assert writer.write() > 0
UpperCAmelCase : int = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) )
assert dataset.features == reloaded_dataset.features
UpperCAmelCase : Any = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) , streaming=UpperCAmelCase_ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected' , [
(Features({'foo': Value('int32' )} ), None),
(Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
assert get_writer_batch_size(UpperCAmelCase_ ) == expected
| 695 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """whisper"""
UpperCAmelCase_ : Tuple = ["""past_key_values"""]
UpperCAmelCase_ : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : str , lowercase_ : Any=51_865 , lowercase_ : List[Any]=80 , lowercase_ : int=6 , lowercase_ : Dict=4 , lowercase_ : List[Any]=6 , lowercase_ : Any=4 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : List[Any]=50_257 , lowercase_ : Optional[int]=True , lowercase_ : Any=True , lowercase_ : str="gelu" , lowercase_ : List[str]=256 , lowercase_ : str=0.0 , lowercase_ : Any=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=False , lowercase_ : Union[str, Any]=1_500 , lowercase_ : List[Any]=448 , lowercase_ : int=50_256 , lowercase_ : Union[str, Any]=50_256 , lowercase_ : List[Any]=50_256 , lowercase_ : Tuple=None , lowercase_ : Optional[Any]=[220, 50_256] , lowercase_ : Tuple=False , lowercase_ : str=256 , lowercase_ : Optional[Any]=False , lowercase_ : List[Any]=0.05 , lowercase_ : Any=10 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=10 , lowercase_ : int=0 , lowercase_ : Optional[int]=7 , **lowercase_ : Union[str, Any] , ) -> List[str]:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Any = num_mel_bins
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : str = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_layers
UpperCAmelCase : Any = decoder_attention_heads
UpperCAmelCase : Tuple = decoder_ffn_dim
UpperCAmelCase : List[str] = encoder_ffn_dim
UpperCAmelCase : int = dropout
UpperCAmelCase : int = attention_dropout
UpperCAmelCase : List[Any] = activation_dropout
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Union[str, Any] = init_std
UpperCAmelCase : Dict = encoder_layerdrop
UpperCAmelCase : str = decoder_layerdrop
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase : Tuple = max_source_positions
UpperCAmelCase : List[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase : Optional[int] = classifier_proj_size
UpperCAmelCase : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Optional[Any] = apply_spec_augment
UpperCAmelCase : Optional[Any] = mask_time_prob
UpperCAmelCase : Optional[Any] = mask_time_length
UpperCAmelCase : str = mask_time_min_masks
UpperCAmelCase : List[str] = mask_feature_prob
UpperCAmelCase : Tuple = mask_feature_length
UpperCAmelCase : Optional[int] = mask_feature_min_masks
UpperCAmelCase : str = median_filter_width
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , suppress_tokens=lowercase_ , begin_suppress_tokens=lowercase_ , **lowercase_ , )
class A_ ( _snake_case ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase : Optional[int] = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase : int = {0: 'batch'}
else:
UpperCAmelCase : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='inputs' )
return common_inputs
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , lowercase_ : int = 22_050 , lowercase_ : float = 5.0 , lowercase_ : int = 220 , ) -> Mapping[str, Any]:
UpperCAmelCase : Tuple = OrderedDict()
UpperCAmelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase_ , framework=lowercase_ , sampling_rate=lowercase_ , time_duration=lowercase_ , frequency=lowercase_ , )
UpperCAmelCase : Optional[Any] = encoder_inputs['input_features'].shape[2]
UpperCAmelCase : Tuple = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase : Optional[int] = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Dict = encoder_inputs.pop('input_features' )
UpperCAmelCase : List[str] = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase : Union[str, Any] = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def UpperCAmelCase_ ( self : Dict ) -> float:
return 1E-3
| 695 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1_00 , ):
UpperCAmelCase : int = x_start
UpperCAmelCase : str = fnc(UpperCAmelCase_ )
UpperCAmelCase : Tuple = 0.0
for _ in range(UpperCAmelCase_ ):
# Approximates curve as a sequence of linear lines and sums their length
UpperCAmelCase : List[str] = (x_end - x_start) / steps + xa
UpperCAmelCase : Union[str, Any] = fnc(UpperCAmelCase_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
UpperCAmelCase : int = xa
UpperCAmelCase : List[str] = fxa
return length
if __name__ == "__main__":
def UpperCamelCase( UpperCAmelCase_ ):
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
lowercase__ = 10
while i <= 100000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 695 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowercase__ = "Create a default config file for Accelerate with only a few flags set."
def UpperCamelCase( UpperCAmelCase_="no" , UpperCAmelCase_ = default_json_config_file , UpperCAmelCase_ = False ):
UpperCAmelCase : Any = Path(UpperCAmelCase_ )
path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
UpperCAmelCase : Optional[int] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
UpperCAmelCase : Dict = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
UpperCAmelCase : Dict = torch.cuda.device_count()
UpperCAmelCase : List[Any] = num_gpus
UpperCAmelCase : List[Any] = False
if num_gpus > 1:
UpperCAmelCase : Tuple = 'MULTI_GPU'
else:
UpperCAmelCase : Optional[Any] = 'NO'
elif is_xpu_available() and use_xpu:
UpperCAmelCase : Optional[int] = torch.xpu.device_count()
UpperCAmelCase : Optional[int] = num_xpus
UpperCAmelCase : Any = False
if num_xpus > 1:
UpperCAmelCase : Tuple = 'MULTI_XPU'
else:
UpperCAmelCase : str = 'NO'
elif is_npu_available():
UpperCAmelCase : Optional[int] = torch.npu.device_count()
UpperCAmelCase : str = num_npus
UpperCAmelCase : int = False
if num_npus > 1:
UpperCAmelCase : int = 'MULTI_NPU'
else:
UpperCAmelCase : List[str] = 'NO'
else:
UpperCAmelCase : str = 0
UpperCAmelCase : int = True
UpperCAmelCase : str = 1
UpperCAmelCase : str = 'NO'
UpperCAmelCase : Any = ClusterConfig(**UpperCAmelCase_ )
config.to_json_file(UpperCAmelCase_ )
return path
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = parser.add_parser('default' , parents=UpperCAmelCase_ , help=UpperCAmelCase_ , formatter_class=UpperCAmelCase_ )
parser.add_argument(
'--config_file' , default=UpperCAmelCase_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=UpperCAmelCase_ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=UpperCAmelCase_ )
return parser
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 695 | 1 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase : List[Any] = 2**power
UpperCAmelCase : List[Any] = 0
while n:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 695 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> None:
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 695 | 1 |
'''simple docstring'''
lowercase__ = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 695 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowercase__ = logging.getLogger(__name__)
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : str
UpperCAmelCase_ : str
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[int]
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[Union[int, float]] = None
UpperCAmelCase_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : List[str] , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = None , lowercase_ : List[str]=False , lowercase_ : bool = False , ) -> Optional[Any]:
UpperCAmelCase : Dict = hans_processors[task]()
UpperCAmelCase : List[Any] = os.path.join(
lowercase_ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(lowercase_ ) , lowercase_ , ) , )
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase : int = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase : Tuple = torch.load(lowercase_ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase : int = (
processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
)
logger.info('Training examples: %s' , len(lowercase_ ) )
UpperCAmelCase : Dict = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
logger.info('Saving features into cached file %s' , lowercase_ )
torch.save(self.features , lowercase_ )
def __len__( self : Union[str, Any] ) -> str:
return len(self.features )
def __getitem__( self : Dict , lowercase_ : Dict ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : Tuple , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = 128 , lowercase_ : int=False , lowercase_ : bool = False , ) -> Union[str, Any]:
UpperCAmelCase : int = hans_processors[task]()
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Any = label_list
UpperCAmelCase : str = processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
UpperCAmelCase : int = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(lowercase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase : Optional[Any] = tf.data.Dataset.from_generator(
lowercase_ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
return self.dataset
def __len__( self : Tuple ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : List[Any] , lowercase_ : Union[str, Any] ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
return self.label_list
class A_ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int , lowercase_ : Optional[int] ) -> Any:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_train_set.txt' ) ) , 'train' )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> List[str]:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
return ["contradiction", "entailment", "neutral"]
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : str ) -> Dict:
UpperCAmelCase : Union[str, Any] = []
for i, line in enumerate(lowercase_ ):
if i == 0:
continue
UpperCAmelCase : Tuple = '%s-%s' % (set_type, line[0])
UpperCAmelCase : Tuple = line[5]
UpperCAmelCase : Dict = line[6]
UpperCAmelCase : Optional[Any] = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCAmelCase : Optional[Any] = line[0]
examples.append(InputExample(guid=lowercase_ , text_a=lowercase_ , text_b=lowercase_ , label=lowercase_ , pairID=lowercase_ ) )
return examples
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
UpperCAmelCase : List[str] = {label: i for i, label in enumerate(UpperCAmelCase_ )}
UpperCAmelCase : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(UpperCAmelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
UpperCAmelCase : int = tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , truncation=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , )
UpperCAmelCase : List[str] = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase : Any = int(example.pairID )
features.append(InputFeatures(**UpperCAmelCase_ , label=UpperCAmelCase_ , pairID=UpperCAmelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
lowercase__ = {
"hans": 3,
}
lowercase__ = {
"hans": HansProcessor,
}
| 695 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase , UpperCAmelCase : Any = 1, 1
UpperCAmelCase : Any = []
for i in range(1 , n + 1 ):
UpperCAmelCase : Tuple = prev_numerator + 2 * prev_denominator
UpperCAmelCase : Any = prev_numerator + prev_denominator
if len(str(UpperCAmelCase_ ) ) > len(str(UpperCAmelCase_ ) ):
result.append(UpperCAmelCase_ )
UpperCAmelCase : Dict = numerator
UpperCAmelCase : Dict = denominator
return len(UpperCAmelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 1 |
'''simple docstring'''
from __future__ import annotations
lowercase__ = list[tuple[int, int]]
lowercase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A_ :
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : float , lowercase_ : Node | None , ) -> Dict:
UpperCAmelCase : List[Any] = pos_x
UpperCAmelCase : List[str] = pos_y
UpperCAmelCase : List[str] = (pos_y, pos_x)
UpperCAmelCase : str = goal_x
UpperCAmelCase : List[str] = goal_y
UpperCAmelCase : Union[str, Any] = g_cost
UpperCAmelCase : Optional[Any] = parent
UpperCAmelCase : Optional[int] = self.calculate_heuristic()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> float:
UpperCAmelCase : List[Any] = abs(self.pos_x - self.goal_x )
UpperCAmelCase : List[str] = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Optional[Any] , lowercase_ : Dict ) -> bool:
return self.f_cost < other.f_cost
class A_ :
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : tuple[int, int] , lowercase_ : tuple[int, int] ) -> Union[str, Any]:
UpperCAmelCase : int = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowercase_ )
UpperCAmelCase : int = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , lowercase_ )
UpperCAmelCase : str = [self.start]
UpperCAmelCase : list[Node] = []
UpperCAmelCase : Dict = False
def UpperCAmelCase_ ( self : List[str] ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
UpperCAmelCase : Optional[Any] = True
return self.retrace_path(lowercase_ )
self.closed_nodes.append(lowercase_ )
UpperCAmelCase : str = self.get_successors(lowercase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowercase_ )
else:
# retrieve the best current path
UpperCAmelCase : str = self.open_nodes.pop(self.open_nodes.index(lowercase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowercase_ )
else:
self.open_nodes.append(lowercase_ )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : Node ) -> list[Node]:
UpperCAmelCase : str = []
for action in delta:
UpperCAmelCase : Union[str, Any] = parent.pos_x + action[1]
UpperCAmelCase : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowercase_ , lowercase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowercase_ , ) )
return successors
def UpperCAmelCase_ ( self : str , lowercase_ : Node | None ) -> Path:
UpperCAmelCase : Union[str, Any] = node
UpperCAmelCase : Tuple = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase : List[Any] = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowercase__ = (0, 0)
lowercase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
lowercase__ = GreedyBestFirst(init, goal)
lowercase__ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowercase__ = 2
for elem in grid:
print(elem)
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase : List[Any] = 2**power
UpperCAmelCase : List[Any] = 0
while n:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 695 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ["""pixel_values"""]
def __init__( self : int , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : float = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : str , ) -> None:
super().__init__(**lowercase_ )
UpperCAmelCase : Optional[int] = size if size is not None else {'shortest_edge': 384}
UpperCAmelCase : Optional[int] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase : str = do_resize
UpperCAmelCase : str = size
# Default value set here for backwards compatibility where the value in config is None
UpperCAmelCase : Optional[int] = crop_pct if crop_pct is not None else 224 / 256
UpperCAmelCase : Union[str, Any] = resample
UpperCAmelCase : Optional[int] = do_rescale
UpperCAmelCase : Dict = rescale_factor
UpperCAmelCase : str = do_normalize
UpperCAmelCase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase_ ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : float , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Tuple , ) -> np.ndarray:
UpperCAmelCase : List[str] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
UpperCAmelCase : Optional[int] = size['shortest_edge']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
UpperCAmelCase : List[Any] = int(shortest_edge / crop_pct )
UpperCAmelCase : Optional[int] = get_resize_output_image_size(lowercase_ , size=lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase : List[str] = resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowercase_ , size=(shortest_edge, shortest_edge) , data_format=lowercase_ , **lowercase_ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowercase_ , size=(shortest_edge, shortest_edge) , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self : Tuple , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : str , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : float = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : Union[str, Any] , ) -> PIL.Image.Image:
UpperCAmelCase : Dict = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase : int = crop_pct if crop_pct is not None else self.crop_pct
UpperCAmelCase : Optional[Any] = resample if resample is not None else self.resample
UpperCAmelCase : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase : List[Any] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase : Optional[int] = image_std if image_std is not None else self.image_std
UpperCAmelCase : Union[str, Any] = size if size is not None else self.size
UpperCAmelCase : Optional[Any] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase : str = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
UpperCAmelCase : int = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
UpperCAmelCase : str = [self.resize(image=lowercase_ , size=lowercase_ , crop_pct=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
UpperCAmelCase : List[Any] = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
UpperCAmelCase : str = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
UpperCAmelCase : Optional[Any] = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
UpperCAmelCase : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 695 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = """blip_2_vision_model"""
def __init__( self : List[str] , lowercase_ : int=1_408 , lowercase_ : Tuple=6_144 , lowercase_ : Dict=39 , lowercase_ : Optional[int]=16 , lowercase_ : str=224 , lowercase_ : Any=14 , lowercase_ : int="gelu" , lowercase_ : int=0.0_0001 , lowercase_ : Optional[int]=0.0 , lowercase_ : Dict=1E-10 , lowercase_ : List[str]=True , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
super().__init__(**lowercase_ )
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : str = patch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : str = qkv_bias
@classmethod
def UpperCAmelCase_ ( cls : List[str] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip_2_qformer"""
def __init__( self : Tuple , lowercase_ : Union[str, Any]=30_522 , lowercase_ : int=768 , lowercase_ : Dict=12 , lowercase_ : Dict=12 , lowercase_ : int=3_072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Tuple=512 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=1E-12 , lowercase_ : str=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Optional[int]=2 , lowercase_ : str=1_408 , **lowercase_ : Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Any = layer_norm_eps
UpperCAmelCase : Dict = position_embedding_type
UpperCAmelCase : Any = cross_attention_frequency
UpperCAmelCase : Any = encoder_hidden_size
@classmethod
def UpperCAmelCase_ ( cls : List[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : List[str] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip-2"""
UpperCAmelCase_ : Any = True
def __init__( self : Union[str, Any] , lowercase_ : List[Any]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=None , lowercase_ : Dict=32 , **lowercase_ : Union[str, Any] ) -> Any:
super().__init__(**lowercase_ )
if vision_config is None:
UpperCAmelCase : Union[str, Any] = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
UpperCAmelCase : int = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
UpperCAmelCase : Dict = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
UpperCAmelCase : str = BlipaVisionConfig(**lowercase_ )
UpperCAmelCase : str = BlipaQFormerConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
UpperCAmelCase : int = CONFIG_MAPPING[text_model_type](**lowercase_ )
UpperCAmelCase : Optional[int] = self.text_config.tie_word_embeddings
UpperCAmelCase : Dict = self.text_config.is_encoder_decoder
UpperCAmelCase : Tuple = num_query_tokens
UpperCAmelCase : Tuple = self.vision_config.hidden_size
UpperCAmelCase : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase : Union[str, Any] = 1.0
UpperCAmelCase : Union[str, Any] = 0.02
@classmethod
def UpperCAmelCase_ ( cls : Dict , lowercase_ : BlipaVisionConfig , lowercase_ : BlipaQFormerConfig , lowercase_ : PretrainedConfig , **lowercase_ : List[Any] , ) -> Tuple:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowercase_ , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
UpperCAmelCase : Optional[int] = self.qformer_config.to_dict()
UpperCAmelCase : List[str] = self.text_config.to_dict()
UpperCAmelCase : str = self.__class__.model_type
return output
| 695 | 1 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase__ = 16
lowercase__ = 32
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ = 16 , UpperCAmelCase_ = "bert-base-cased" ):
UpperCAmelCase : Any = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
UpperCAmelCase : Tuple = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase : Optional[int] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase : Optional[Any] = datasets.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=UpperCAmelCase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase : Optional[int] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase_ , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(UpperCAmelCase_ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
UpperCAmelCase : Any = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ )
UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ )
return train_dataloader, eval_dataloader
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
# Initialize accelerator
UpperCAmelCase : List[str] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase : Any = config['lr']
UpperCAmelCase : Tuple = int(config['num_epochs'] )
UpperCAmelCase : List[Any] = int(config['seed'] )
UpperCAmelCase : Tuple = int(config['batch_size'] )
UpperCAmelCase : str = args.model_name_or_path
set_seed(UpperCAmelCase_ )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = get_dataloaders(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
# Instantiate optimizer
UpperCAmelCase : Optional[int] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase : List[str] = optimizer_cls(params=model.parameters() , lr=UpperCAmelCase_ )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase : int = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
UpperCAmelCase : str = 1
UpperCAmelCase : List[str] = (len(UpperCAmelCase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase : Optional[int] = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase_ , num_warmup_steps=0 , num_training_steps=UpperCAmelCase_ , )
else:
UpperCAmelCase : List[Any] = DummyScheduler(UpperCAmelCase_ , total_num_steps=UpperCAmelCase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = accelerator.prepare(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase : Optional[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase : Optional[int] = 0
# Now we train the model
UpperCAmelCase : Union[str, Any] = evaluate.load('glue' , 'mrpc' )
UpperCAmelCase : str = 0
UpperCAmelCase : int = {}
for epoch in range(UpperCAmelCase_ , UpperCAmelCase_ ):
model.train()
for step, batch in enumerate(UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = model(**UpperCAmelCase_ )
UpperCAmelCase : List[Any] = outputs.loss
UpperCAmelCase : int = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
UpperCAmelCase : str = 0
for step, batch in enumerate(UpperCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase : Any = model(**UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCAmelCase , UpperCAmelCase : Dict = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCAmelCase_ ) - 1:
UpperCAmelCase : List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCAmelCase_ , references=UpperCAmelCase_ , )
UpperCAmelCase : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , UpperCAmelCase_ )
UpperCAmelCase : Any = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
UpperCAmelCase : Optional[int] = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase( ):
UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=UpperCAmelCase_ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=UpperCAmelCase_ , )
parser.add_argument(
'--output_dir' , type=UpperCAmelCase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=UpperCAmelCase_ , default=3 , help='Number of train epochs.' , )
UpperCAmelCase : str = parser.parse_args()
UpperCAmelCase : str = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 695 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase__ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
lowercase__ = json.load(f)
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict , lowercase_ : Dict ) -> Tuple:
return FSMTTokenizer.from_pretrained(lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> Tuple:
UpperCAmelCase : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(lowercase_ ).to(lowercase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int , lowercase_ : Any ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCAmelCase : List[str] = f"""facebook/wmt19-{pair}"""
UpperCAmelCase : Optional[int] = self.get_tokenizer(lowercase_ )
UpperCAmelCase : int = self.get_model(lowercase_ )
UpperCAmelCase : List[Any] = bleu_data[pair]['src']
UpperCAmelCase : Optional[int] = bleu_data[pair]['tgt']
UpperCAmelCase : Any = tokenizer(lowercase_ , return_tensors='pt' , truncation=lowercase_ , padding='longest' ).to(lowercase_ )
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase : List[Any] = tokenizer.batch_decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
UpperCAmelCase : Any = calculate_bleu(lowercase_ , lowercase_ )
print(lowercase_ )
self.assertGreaterEqual(scores['bleu'] , lowercase_ )
| 695 | 1 |
'''simple docstring'''
import operator as op
lowercase__ = "scaler.pt"
lowercase__ = "pytorch_model"
lowercase__ = "random_states"
lowercase__ = "optimizer"
lowercase__ = "scheduler"
lowercase__ = "pytorch_model.bin"
lowercase__ = "pytorch_model.bin.index.json"
lowercase__ = "model.safetensors"
lowercase__ = "model.safetensors.index.json"
lowercase__ = "1.10.2"
lowercase__ = "py38"
lowercase__ = "4.17.0"
lowercase__ = ["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"]
lowercase__ = ["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHARD", "HYBRID_SHARD_ZERO2"]
lowercase__ = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"]
lowercase__ = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"]
lowercase__ = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
lowercase__ = "2.0.1"
lowercase__ = ["pdsh", "standard", "openmpi", "mvapich"]
lowercase__ = ["default", "reduce-overhead", "max-autotune"]
lowercase__ = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
lowercase__ = [
"nnodes",
"nproc_per_node",
"rdzv_backend",
"rdzv_endpoint",
"rdzv_id",
"rdzv_conf",
"standalone",
"max_restarts",
"monitor_interval",
"start_method",
"role",
"module",
"m",
"no_python",
"run_path",
"log_dir",
"r",
"redirects",
"t",
"tee",
"node_rank",
"master_addr",
"master_port",
]
lowercase__ = ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"]
lowercase__ = ["DEEPSPEED", "MULTI_XPU", "FSDP"]
| 695 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """pix2struct_text_model"""
UpperCAmelCase_ : Union[str, Any] = ["""past_key_values"""]
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : int , lowercase_ : str=50_244 , lowercase_ : Tuple=768 , lowercase_ : List[Any]=64 , lowercase_ : List[Any]=2_048 , lowercase_ : Optional[Any]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=32 , lowercase_ : List[str]=128 , lowercase_ : List[Any]=0.1 , lowercase_ : List[str]=1E-6 , lowercase_ : Union[str, Any]=1.0 , lowercase_ : Dict="gelu_new" , lowercase_ : Any=0 , lowercase_ : Any=False , lowercase_ : List[Any]=0 , lowercase_ : Tuple=1 , lowercase_ : List[str]=False , lowercase_ : List[Any]=True , **lowercase_ : Union[str, Any] , ) -> Dict:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : int = hidden_size
UpperCAmelCase : List[Any] = d_kv
UpperCAmelCase : Any = d_ff
UpperCAmelCase : List[str] = num_layers
UpperCAmelCase : str = num_heads
UpperCAmelCase : List[Any] = relative_attention_num_buckets
UpperCAmelCase : Tuple = relative_attention_max_distance
UpperCAmelCase : str = dropout_rate
UpperCAmelCase : Optional[int] = layer_norm_epsilon
UpperCAmelCase : int = initializer_factor
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : List[Any] = eos_token_id
UpperCAmelCase : Union[str, Any] = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase : List[str] = dense_act_fn
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , tie_word_embeddings=lowercase_ , is_decoder=lowercase_ , **lowercase_ , )
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : str = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : Any = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = """pix2struct_vision_model"""
def __init__( self : str , lowercase_ : Any=768 , lowercase_ : Union[str, Any]=768 , lowercase_ : Union[str, Any]=2_048 , lowercase_ : Tuple=64 , lowercase_ : Dict=12 , lowercase_ : Optional[int]=12 , lowercase_ : int="gelu_new" , lowercase_ : List[Any]=1E-6 , lowercase_ : Optional[int]=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : str=1E-10 , lowercase_ : Dict=1.0 , lowercase_ : int=4_096 , lowercase_ : Tuple=32 , lowercase_ : Any=128 , **lowercase_ : Any , ) -> Tuple:
super().__init__(**lowercase_ )
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : Any = patch_embed_hidden_size
UpperCAmelCase : Optional[int] = d_ff
UpperCAmelCase : Dict = dropout_rate
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : str = initializer_factor
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : Union[str, Any] = dense_act_fn
UpperCAmelCase : Dict = seq_len
UpperCAmelCase : Optional[int] = relative_attention_num_buckets
UpperCAmelCase : Union[str, Any] = relative_attention_max_distance
UpperCAmelCase : str = d_kv
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Any ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Tuple = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : List[str] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """pix2struct"""
UpperCAmelCase_ : Dict = True
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : int=None , lowercase_ : Optional[Any]=1.0 , lowercase_ : List[str]=0.02 , lowercase_ : str=False , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , **lowercase_ : Optional[Any] , ) -> str:
super().__init__(tie_word_embeddings=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ )
if text_config is None:
UpperCAmelCase : Optional[int] = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
UpperCAmelCase : List[str] = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
UpperCAmelCase : Optional[Any] = PixaStructTextConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = PixaStructVisionConfig(**lowercase_ )
UpperCAmelCase : Optional[Any] = self.text_config.decoder_start_token_id
UpperCAmelCase : str = self.text_config.pad_token_id
UpperCAmelCase : Optional[int] = self.text_config.eos_token_id
UpperCAmelCase : Union[str, Any] = initializer_factor
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : str = is_vqa
@classmethod
def UpperCAmelCase_ ( cls : Tuple , lowercase_ : PixaStructTextConfig , lowercase_ : PixaStructVisionConfig , **lowercase_ : str ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.text_config.to_dict()
UpperCAmelCase : Dict = self.vision_config.to_dict()
UpperCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 695 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *lowercase_ : Dict , **lowercase_ : Optional[int] ) -> None:
warnings.warn(
'The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DeiTImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 695 |
'''simple docstring'''
import baseaa
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baaencode(string.encode('utf-8' ) )
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baadecode(UpperCAmelCase_ ).decode('utf-8' )
if __name__ == "__main__":
lowercase__ = "Hello World!"
lowercase__ = baseaa_encode(test)
print(encoded)
lowercase__ = baseaa_decode(encoded)
print(decoded)
| 695 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if dataset.ndim != value_array.ndim:
UpperCAmelCase : str = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(UpperCAmelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCAmelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(UpperCAmelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCAmelCase : List[str] = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(UpperCAmelCase_ )
UpperCAmelCase : str = []
for value in value_array:
UpperCAmelCase : Optional[Any] = euclidean(UpperCAmelCase_ , dataset[0] )
UpperCAmelCase : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCAmelCase : Tuple = euclidean(UpperCAmelCase_ , UpperCAmelCase_ )
if dist > temp_dist:
UpperCAmelCase : List[str] = temp_dist
UpperCAmelCase : str = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) / (norm(UpperCAmelCase_ ) * norm(UpperCAmelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 1 |
'''simple docstring'''
import os
import sys
import unittest
lowercase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowercase__ = os.path.join(git_repo_path, "src", "transformers")
lowercase__ = "\n{0} = None\n"
lowercase__ = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
lowercase__ = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
UpperCAmelCase : Dict = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(lowercase_ )
UpperCAmelCase : Dict = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(lowercase_ , 'tokenizers' )
UpperCAmelCase : str = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(lowercase_ , 'tensorflow_text' )
UpperCAmelCase : Union[str, Any] = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(lowercase_ , 'sentencepiece_and_tokenizers' )
UpperCAmelCase : List[str] = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(lowercase_ , 'sentencepiece_and_tensorflow_text' )
UpperCAmelCase : List[str] = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(lowercase_ , 'sentencepiece_and_tokenizers_and_vision' )
def UpperCAmelCase_ ( self : Any ) -> Dict:
UpperCAmelCase : Any = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , lowercase_ )
self.assertIn('tensorflow_text' , lowercase_ )
self.assertIn('sentencepiece_and_tokenizers' , lowercase_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase : int = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(lowercase_ , '\nCONSTANT = None\n' )
UpperCAmelCase : Union[str, Any] = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
lowercase_ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
UpperCAmelCase : Union[str, Any] = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
UpperCAmelCase : List[Any] = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
UpperCAmelCase : Optional[int] = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
UpperCAmelCase : int = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , lowercase_ )
| 695 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase__ = get_tests_dir("fixtures")
lowercase__ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
lowercase__ = get_tests_dir("fixtures/dummy-config.json")
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : Optional[Any] = 0
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ ).to_dict()
config_dict.pop('feature_extractor_type' )
UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor(**lowercase_ )
# save in new folder
model_config.save_pretrained(lowercase_ )
config.save_pretrained(lowercase_ )
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(lowercase_ )
# make sure private variable is not incorrectly saved
UpperCAmelCase : List[Any] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('bert-base' )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
with self.assertRaisesRegex(
lowercase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase : int = AutoFeatureExtractor.from_pretrained(lowercase_ , revision='aaaaaa' )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Any = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase : Dict = CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase_ ( self : int ) -> Tuple:
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = True
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# If remote code is not set, the default is to use local
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(lowercase_ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 695 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """deformable_detr"""
UpperCAmelCase_ : str = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Union[str, Any] , lowercase_ : List[Any]=True , lowercase_ : str=None , lowercase_ : int=3 , lowercase_ : List[Any]=300 , lowercase_ : List[str]=1_024 , lowercase_ : Tuple=6 , lowercase_ : Union[str, Any]=1_024 , lowercase_ : Optional[Any]=8 , lowercase_ : List[Any]=6 , lowercase_ : Any=1_024 , lowercase_ : List[Any]=8 , lowercase_ : Tuple=0.0 , lowercase_ : int=True , lowercase_ : int="relu" , lowercase_ : Union[str, Any]=256 , lowercase_ : List[Any]=0.1 , lowercase_ : Any=0.0 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Tuple=0.02 , lowercase_ : Optional[Any]=1.0 , lowercase_ : Optional[int]=True , lowercase_ : List[str]=False , lowercase_ : Any="sine" , lowercase_ : Dict="resnet50" , lowercase_ : List[str]=True , lowercase_ : Dict=False , lowercase_ : List[Any]=4 , lowercase_ : List[Any]=4 , lowercase_ : Union[str, Any]=4 , lowercase_ : Optional[Any]=False , lowercase_ : Tuple=300 , lowercase_ : Tuple=False , lowercase_ : Union[str, Any]=1 , lowercase_ : Dict=5 , lowercase_ : Union[str, Any]=2 , lowercase_ : List[Any]=1 , lowercase_ : Dict=1 , lowercase_ : Dict=5 , lowercase_ : str=2 , lowercase_ : Optional[int]=0.1 , lowercase_ : List[str]=0.25 , lowercase_ : Union[str, Any]=False , **lowercase_ : List[str] , ) -> List[str]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
UpperCAmelCase : List[Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : Optional[int] = backbone_config.get('model_type' )
UpperCAmelCase : Any = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase : List[str] = config_class.from_dict(lowercase_ )
UpperCAmelCase : Any = use_timm_backbone
UpperCAmelCase : int = backbone_config
UpperCAmelCase : Any = num_channels
UpperCAmelCase : Optional[int] = num_queries
UpperCAmelCase : str = max_position_embeddings
UpperCAmelCase : Optional[Any] = d_model
UpperCAmelCase : Any = encoder_ffn_dim
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : Any = encoder_attention_heads
UpperCAmelCase : Optional[int] = decoder_ffn_dim
UpperCAmelCase : Union[str, Any] = decoder_layers
UpperCAmelCase : List[Any] = decoder_attention_heads
UpperCAmelCase : Any = dropout
UpperCAmelCase : Dict = attention_dropout
UpperCAmelCase : List[Any] = activation_dropout
UpperCAmelCase : List[Any] = activation_function
UpperCAmelCase : List[Any] = init_std
UpperCAmelCase : Union[str, Any] = init_xavier_std
UpperCAmelCase : Optional[Any] = encoder_layerdrop
UpperCAmelCase : Tuple = auxiliary_loss
UpperCAmelCase : Tuple = position_embedding_type
UpperCAmelCase : Tuple = backbone
UpperCAmelCase : Optional[int] = use_pretrained_backbone
UpperCAmelCase : Tuple = dilation
# deformable attributes
UpperCAmelCase : Union[str, Any] = num_feature_levels
UpperCAmelCase : Optional[int] = encoder_n_points
UpperCAmelCase : str = decoder_n_points
UpperCAmelCase : str = two_stage
UpperCAmelCase : int = two_stage_num_proposals
UpperCAmelCase : Tuple = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
UpperCAmelCase : List[str] = class_cost
UpperCAmelCase : List[str] = bbox_cost
UpperCAmelCase : str = giou_cost
# Loss coefficients
UpperCAmelCase : List[str] = mask_loss_coefficient
UpperCAmelCase : Union[str, Any] = dice_loss_coefficient
UpperCAmelCase : List[str] = bbox_loss_coefficient
UpperCAmelCase : List[str] = giou_loss_coefficient
UpperCAmelCase : Union[str, Any] = eos_coefficient
UpperCAmelCase : Optional[int] = focal_alpha
UpperCAmelCase : Union[str, Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
return self.encoder_attention_heads
@property
def UpperCAmelCase_ ( self : int ) -> int:
return self.d_model
def UpperCAmelCase_ ( self : int ) -> List[Any]:
UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCAmelCase : Optional[Any] = self.backbone_config.to_dict()
UpperCAmelCase : List[Any] = self.__class__.model_type
return output
| 695 |
'''simple docstring'''
from datetime import datetime
import requests
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
UpperCAmelCase : List[str] = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(UpperCAmelCase_ ).content
if __name__ == "__main__":
lowercase__ = input("Enter Video/IGTV url: ").strip()
lowercase__ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 695 | 1 |
'''simple docstring'''
import math
import unittest
def UpperCamelCase( UpperCAmelCase_ ):
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
with self.assertRaises(lowercase_ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10**9 ):
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : List[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCAmelCase : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 1 |
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def UpperCamelCase( UpperCAmelCase_ ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
UpperCAmelCase : Optional[int] = precision
UpperCAmelCase : Optional[int] = ceil(precision / 14 )
UpperCAmelCase : List[str] = 42_68_80 * Decimal(1_00_05 ).sqrt()
UpperCAmelCase : Any = 1
UpperCAmelCase : str = 13_59_14_09
UpperCAmelCase : List[str] = Decimal(UpperCAmelCase_ )
for k in range(1 , UpperCAmelCase_ ):
UpperCAmelCase : int = factorial(6 * k ) // (factorial(3 * k ) * factorial(UpperCAmelCase_ ) ** 3)
linear_term += 5_45_14_01_34
exponential_term *= -26_25_37_41_26_40_76_80_00
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowercase__ = 50
print(f'''The first {n} digits of pi is: {pi(n)}''')
| 695 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase : str = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
UpperCAmelCase : Any = self.dummy_uncond_unet
UpperCAmelCase : Tuple = DDIMScheduler()
UpperCAmelCase : Optional[Any] = self.dummy_vq_model
UpperCAmelCase : str = LDMPipeline(unet=lowercase_ , vqvae=lowercase_ , scheduler=lowercase_ )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : int = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' ).images
UpperCAmelCase : int = torch.manual_seed(0 )
UpperCAmelCase : Tuple = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : List[str] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCAmelCase : Tuple = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : Any = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Tuple = torch.manual_seed(0 )
UpperCAmelCase : Dict = ldm(generator=lowercase_ , num_inference_steps=5 , output_type='numpy' ).images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCAmelCase : Any = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 695 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowercase__ = "Create a default config file for Accelerate with only a few flags set."
def UpperCamelCase( UpperCAmelCase_="no" , UpperCAmelCase_ = default_json_config_file , UpperCAmelCase_ = False ):
UpperCAmelCase : Any = Path(UpperCAmelCase_ )
path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
UpperCAmelCase : Optional[int] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
UpperCAmelCase : Dict = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
UpperCAmelCase : Dict = torch.cuda.device_count()
UpperCAmelCase : List[Any] = num_gpus
UpperCAmelCase : List[Any] = False
if num_gpus > 1:
UpperCAmelCase : Tuple = 'MULTI_GPU'
else:
UpperCAmelCase : Optional[Any] = 'NO'
elif is_xpu_available() and use_xpu:
UpperCAmelCase : Optional[int] = torch.xpu.device_count()
UpperCAmelCase : Optional[int] = num_xpus
UpperCAmelCase : Any = False
if num_xpus > 1:
UpperCAmelCase : Tuple = 'MULTI_XPU'
else:
UpperCAmelCase : str = 'NO'
elif is_npu_available():
UpperCAmelCase : Optional[int] = torch.npu.device_count()
UpperCAmelCase : str = num_npus
UpperCAmelCase : int = False
if num_npus > 1:
UpperCAmelCase : int = 'MULTI_NPU'
else:
UpperCAmelCase : List[str] = 'NO'
else:
UpperCAmelCase : str = 0
UpperCAmelCase : int = True
UpperCAmelCase : str = 1
UpperCAmelCase : str = 'NO'
UpperCAmelCase : Any = ClusterConfig(**UpperCAmelCase_ )
config.to_json_file(UpperCAmelCase_ )
return path
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = parser.add_parser('default' , parents=UpperCAmelCase_ , help=UpperCAmelCase_ , formatter_class=UpperCAmelCase_ )
parser.add_argument(
'--config_file' , default=UpperCAmelCase_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=UpperCAmelCase_ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=UpperCAmelCase_ )
return parser
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 695 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
UpperCAmelCase : Dict = self.dummy_uncond_unet
UpperCAmelCase : Dict = KarrasVeScheduler()
UpperCAmelCase : str = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : Dict = 'google/ncsnpp-celebahq-256'
UpperCAmelCase : Any = UNetaDModel.from_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = KarrasVeScheduler()
UpperCAmelCase : Dict = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = pipe(num_inference_steps=20 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 695 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[str] = SwinConfig(image_size=1_92 )
if "base" in model_name:
UpperCAmelCase : Optional[int] = 6
UpperCAmelCase : str = 1_28
UpperCAmelCase : Union[str, Any] = (2, 2, 18, 2)
UpperCAmelCase : Union[str, Any] = (4, 8, 16, 32)
elif "large" in model_name:
UpperCAmelCase : List[str] = 12
UpperCAmelCase : Tuple = 1_92
UpperCAmelCase : Dict = (2, 2, 18, 2)
UpperCAmelCase : List[Any] = (6, 12, 24, 48)
else:
raise ValueError('Model not supported, only supports base and large variants' )
UpperCAmelCase : List[Any] = window_size
UpperCAmelCase : Dict = embed_dim
UpperCAmelCase : int = depths
UpperCAmelCase : Union[str, Any] = num_heads
return config
def UpperCamelCase( UpperCAmelCase_ ):
if "encoder.mask_token" in name:
UpperCAmelCase : List[Any] = name.replace('encoder.mask_token' , 'embeddings.mask_token' )
if "encoder.patch_embed.proj" in name:
UpperCAmelCase : str = name.replace('encoder.patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "encoder.patch_embed.norm" in name:
UpperCAmelCase : Any = name.replace('encoder.patch_embed.norm' , 'embeddings.norm' )
if "attn.proj" in name:
UpperCAmelCase : Any = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
UpperCAmelCase : int = name.replace('attn' , 'attention.self' )
if "norm1" in name:
UpperCAmelCase : Any = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCAmelCase : str = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCAmelCase : List[Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCAmelCase : Optional[Any] = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
UpperCAmelCase : List[Any] = 'layernorm.weight'
if name == "encoder.norm.bias":
UpperCAmelCase : List[Any] = 'layernorm.bias'
if "decoder" in name:
pass
else:
UpperCAmelCase : Optional[Any] = 'swin.' + name
return name
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
for key in orig_state_dict.copy().keys():
UpperCAmelCase : List[Any] = orig_state_dict.pop(UpperCAmelCase_ )
if "attn_mask" in key:
pass
elif "qkv" in key:
UpperCAmelCase : Optional[int] = key.split('.' )
UpperCAmelCase : List[Any] = int(key_split[2] )
UpperCAmelCase : Any = int(key_split[4] )
UpperCAmelCase : int = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase : Any = val[:dim, :]
UpperCAmelCase : Optional[Any] = val[
dim : dim * 2, :
]
UpperCAmelCase : Optional[int] = val[-dim:, :]
else:
UpperCAmelCase : List[Any] = val[
:dim
]
UpperCAmelCase : Optional[Any] = val[
dim : dim * 2
]
UpperCAmelCase : List[Any] = val[
-dim:
]
else:
UpperCAmelCase : Optional[int] = val
return orig_state_dict
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )['model']
UpperCAmelCase : List[str] = get_swin_config(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = SwinForMaskedImageModeling(UpperCAmelCase_ )
model.eval()
UpperCAmelCase : List[str] = convert_state_dict(UpperCAmelCase_ , UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ )
UpperCAmelCase : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCAmelCase : Tuple = ViTImageProcessor(size={'height': 1_92, 'width': 1_92} )
UpperCAmelCase : Dict = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
UpperCAmelCase : Optional[Any] = image_processor(images=UpperCAmelCase_ , return_tensors='pt' )
with torch.no_grad():
UpperCAmelCase : Union[str, Any] = model(**UpperCAmelCase_ ).logits
print(outputs.keys() )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
print(F"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(F"""microsoft/{model_name}""" )
image_processor.push_to_hub(F"""microsoft/{model_name}""" )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="swin-base-simmim-window6-192",
type=str,
choices=["swin-base-simmim-window6-192", "swin-large-simmim-window12-192"],
help="Name of the Swin SimMIM model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth",
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowercase__ = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 695 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = """autoformer"""
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Dict , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : str = "student_t" , lowercase_ : str = "nll" , lowercase_ : int = 1 , lowercase_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowercase_ : bool = True , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : Optional[List[int]] = None , lowercase_ : Optional[List[int]] = None , lowercase_ : int = 64 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 32 , lowercase_ : int = 32 , lowercase_ : str = "gelu" , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : int = 100 , lowercase_ : float = 0.02 , lowercase_ : bool = True , lowercase_ : Union[str, Any]=True , lowercase_ : int = 10 , lowercase_ : int = 25 , lowercase_ : int = 3 , **lowercase_ : str , ) -> Dict:
# time series specific configuration
UpperCAmelCase : int = prediction_length
UpperCAmelCase : Optional[Any] = context_length if context_length is not None else prediction_length
UpperCAmelCase : List[Any] = distribution_output
UpperCAmelCase : Tuple = loss
UpperCAmelCase : Dict = input_size
UpperCAmelCase : Dict = num_time_features
UpperCAmelCase : Tuple = lags_sequence
UpperCAmelCase : str = scaling
UpperCAmelCase : Optional[int] = num_dynamic_real_features
UpperCAmelCase : List[str] = num_static_real_features
UpperCAmelCase : Optional[int] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : int = cardinality
else:
UpperCAmelCase : Union[str, Any] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : Any = embedding_dimension
else:
UpperCAmelCase : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase : Dict = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase : Optional[int] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : Dict = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_attention_heads
UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase : str = decoder_ffn_dim
UpperCAmelCase : str = encoder_layers
UpperCAmelCase : Optional[Any] = decoder_layers
UpperCAmelCase : int = dropout
UpperCAmelCase : Any = attention_dropout
UpperCAmelCase : Tuple = activation_dropout
UpperCAmelCase : str = encoder_layerdrop
UpperCAmelCase : Union[str, Any] = decoder_layerdrop
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Dict = init_std
UpperCAmelCase : Union[str, Any] = use_cache
# Autoformer
UpperCAmelCase : Any = label_length
UpperCAmelCase : List[Any] = moving_average
UpperCAmelCase : Optional[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCAmelCase_ ( self : List[str] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 695 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_snake_case )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
UpperCAmelCase_ : ClassVar[Features] = Features({"""image""": Image()} )
UpperCAmelCase_ : ClassVar[Features] = Features({"""labels""": ClassLabel} )
UpperCAmelCase_ : str = "image"
UpperCAmelCase_ : str = "labels"
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Dict ) -> Optional[Any]:
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
UpperCAmelCase : int = copy.deepcopy(self )
UpperCAmelCase : str = self.label_schema.copy()
UpperCAmelCase : Optional[int] = features[self.label_column]
UpperCAmelCase : int = label_schema
return task_template
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695 | 1 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ = "cpu" , UpperCAmelCase_ = None ):
UpperCAmelCase : Dict = torch.load(UpperCAmelCase_ , map_location=UpperCAmelCase_ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(UpperCAmelCase_ , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
UpperCAmelCase : str = v.half()
if save_path is None: # overwrite src_path
UpperCAmelCase : str = src_path
torch.save(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
fire.Fire(convert)
| 695 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : int = sin(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = cos(UpperCAmelCase_ )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : Tuple = (1 - _cos) / 2
UpperCAmelCase : Dict = 1 - _cos
UpperCAmelCase : Union[str, Any] = 1 + alpha
UpperCAmelCase : List[str] = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha
UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[str] = cos(UpperCAmelCase_ )
UpperCAmelCase : Tuple = _sin / (2 * q_factor)
UpperCAmelCase : int = (1 + _cos) / 2
UpperCAmelCase : List[Any] = -1 - _cos
UpperCAmelCase : Optional[int] = 1 + alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : str = 1 - alpha
UpperCAmelCase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : List[Any] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : Any = _sin / (2 * q_factor)
UpperCAmelCase : Optional[Any] = _sin / 2
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : int = -ba
UpperCAmelCase : List[str] = 1 + alpha
UpperCAmelCase : int = -2 * _cos
UpperCAmelCase : Optional[int] = 1 - alpha
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : Tuple = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : int = 1 - alpha
UpperCAmelCase : Dict = -2 * _cos
UpperCAmelCase : Any = 1 + alpha
UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase : Union[str, Any] = 10 ** (gain_db / 40)
UpperCAmelCase : int = 1 + alpha * big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha * big_a
UpperCAmelCase : Tuple = 1 + alpha / big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : int = 1 - alpha / big_a
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Dict = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : Any = cos(UpperCAmelCase_ )
UpperCAmelCase : str = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : List[str] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : List[Any] = big_a * (pmc + aaa)
UpperCAmelCase : Optional[int] = 2 * big_a * mpc
UpperCAmelCase : Optional[int] = big_a * (pmc - aaa)
UpperCAmelCase : str = ppmc + aaa
UpperCAmelCase : int = -2 * pmpc
UpperCAmelCase : int = ppmc - aaa
UpperCAmelCase : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : List[Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : Optional[Any] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : Dict = big_a * (ppmc + aaa)
UpperCAmelCase : List[str] = -2 * big_a * pmpc
UpperCAmelCase : int = big_a * (ppmc - aaa)
UpperCAmelCase : Dict = pmc + aaa
UpperCAmelCase : Optional[int] = 2 * mpc
UpperCAmelCase : int = pmc - aaa
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 695 | 1 |
'''simple docstring'''
lowercase__ = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 1000000,
"gigajoule": 1000000000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 3600000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 4186800.00,
"electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9,
"britishthermalunit_it": 1055.05585,
"footpound": 1.355818,
}
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
UpperCAmelCase : Union[str, Any] = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {", ".join(UpperCAmelCase_ )}"""
)
raise ValueError(UpperCAmelCase_ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowercase__ = TypeVar("T")
class A_ ( Generic[T] ):
'''simple docstring'''
UpperCAmelCase_ : deque[T] # Cache store of keys
UpperCAmelCase_ : set[T] # References of the keys in cache
UpperCAmelCase_ : int = 10 # Maximum capacity of cache
def __init__( self : List[Any] , lowercase_ : int ) -> None:
UpperCAmelCase : Any = deque()
UpperCAmelCase : Dict = set()
if not n:
UpperCAmelCase : Optional[int] = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCAmelCase : str = n
def UpperCAmelCase_ ( self : List[str] , lowercase_ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCAmelCase : Optional[Any] = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> None:
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Union[str, Any] ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 695 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = LxmertTokenizer
UpperCAmelCase_ : Any = LxmertTokenizerFast
UpperCAmelCase_ : int = True
UpperCAmelCase_ : int = True
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
super().setUp()
UpperCAmelCase : List[str] = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCAmelCase_ ( self : Any , lowercase_ : List[str] ) -> Tuple:
UpperCAmelCase : Union[str, Any] = 'UNwant\u00E9d,running'
UpperCAmelCase : Any = 'unwanted, running'
return input_text, output_text
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
UpperCAmelCase : Any = self.tokenizer_class(self.vocab_file )
UpperCAmelCase : Tuple = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowercase_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , [7, 4, 5, 10, 8, 9] )
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase : Any = self.get_tokenizer()
UpperCAmelCase : Optional[int] = self.get_rust_tokenizer()
UpperCAmelCase : Union[str, Any] = 'I was born in 92000, and this is falsé.'
UpperCAmelCase : Dict = tokenizer.tokenize(lowercase_ )
UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
UpperCAmelCase : str = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
UpperCAmelCase : Optional[Any] = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
UpperCAmelCase : str = self.get_rust_tokenizer()
UpperCAmelCase : int = tokenizer.encode(lowercase_ )
UpperCAmelCase : int = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
| 695 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
lowercase__ = {
"gpt-neox-20b": 2048,
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowercase_ : Any=None , lowercase_ : Dict=None , lowercase_ : List[str]=None , lowercase_ : List[Any]="<|endoftext|>" , lowercase_ : List[str]="<|endoftext|>" , lowercase_ : Any="<|endoftext|>" , lowercase_ : List[str]=False , **lowercase_ : Union[str, Any] , ) -> str:
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
UpperCAmelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase_ ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(lowercase_ , pre_tok_state.pop('type' ) )
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Tuple = pre_tok_class(**lowercase_ )
UpperCAmelCase : Any = add_prefix_space
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase : Optional[int] = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : "Conversation" ) -> List[int]:
UpperCAmelCase : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_ ) + [self.eos_token_id] )
if len(lowercase_ ) > self.model_max_length:
UpperCAmelCase : int = input_ids[-self.model_max_length :]
return input_ids
| 695 | 1 |
'''simple docstring'''
from __future__ import annotations
lowercase__ = 1.6_0_2_1e-1_9 # units = C
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif conductivity < 0:
raise ValueError('Conductivity cannot be negative' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative' )
elif mobility < 0:
raise ValueError('mobility cannot be negative' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = """openai/whisper-base"""
UpperCAmelCase_ : Union[str, Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
UpperCAmelCase_ : Dict = """transcriber"""
UpperCAmelCase_ : int = WhisperProcessor
UpperCAmelCase_ : Optional[int] = WhisperForConditionalGeneration
UpperCAmelCase_ : Dict = ["""audio"""]
UpperCAmelCase_ : Optional[int] = ["""text"""]
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str ) -> Optional[int]:
return self.pre_processor(lowercase_ , return_tensors='pt' ).input_features
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int ) -> List[str]:
return self.model.generate(inputs=lowercase_ )
def UpperCAmelCase_ ( self : str , lowercase_ : List[Any] ) -> List[str]:
return self.pre_processor.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )[0]
| 695 | 1 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
lowercase__ = ["bart.large", "bart.large.mnli", "bart.large.cnn", "bart_xsum/model.pt"]
lowercase__ = {"bart.large": BartModel, "bart.large.mnli": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("0.9.0"):
raise Exception("requires fairseq >= 0.9.0")
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
lowercase__ = " Hello world! cécé herlolip"
lowercase__ = [
("model.classification_heads.mnli.dense.weight", "classification_head.dense.weight"),
("model.classification_heads.mnli.dense.bias", "classification_head.dense.bias"),
("model.classification_heads.mnli.out_proj.weight", "classification_head.out_proj.weight"),
("model.classification_heads.mnli.out_proj.bias", "classification_head.out_proj.bias"),
]
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = dct.pop(UpperCAmelCase_ )
UpperCAmelCase : List[str] = val
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Dict = torch.load(UpperCAmelCase_ , map_location='cpu' )
UpperCAmelCase : List[str] = torch.hub.load('pytorch/fairseq' , 'bart.large.cnn' ).eval()
hub_interface.model.load_state_dict(sd['model'] )
return hub_interface
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase , UpperCAmelCase : Optional[Any] = emb.weight.shape
UpperCAmelCase : Optional[int] = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ , bias=UpperCAmelCase_ )
UpperCAmelCase : Tuple = emb.weight.data
return lin_layer
@torch.no_grad()
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ):
if not os.path.exists(UpperCAmelCase_ ):
UpperCAmelCase : str = torch.hub.load('pytorch/fairseq' , UpperCAmelCase_ ).eval()
else:
UpperCAmelCase : Union[str, Any] = load_xsum_checkpoint(UpperCAmelCase_ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
UpperCAmelCase : Tuple = checkpoint_path.replace('.' , '-' )
UpperCAmelCase : Tuple = BartConfig.from_pretrained(UpperCAmelCase_ )
UpperCAmelCase : Any = bart.encode(UpperCAmelCase_ ).unsqueeze(0 )
UpperCAmelCase : Tuple = BartTokenizer.from_pretrained(UpperCAmelCase_ ).encode(UpperCAmelCase_ , return_tensors='pt' ).unsqueeze(0 )
if not torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ).all():
raise ValueError(
F"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" )
if checkpoint_path == "bart.large.mnli":
UpperCAmelCase : str = bart.state_dict()
remove_ignore_keys_(UpperCAmelCase_ )
UpperCAmelCase : Any = state_dict['model.decoder.embed_tokens.weight']
for src, dest in mnli_rename_keys:
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : int = BartForSequenceClassification(UpperCAmelCase_ ).eval()
model.load_state_dict(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = bart.predict('mnli' , UpperCAmelCase_ , return_logits=UpperCAmelCase_ )
UpperCAmelCase : Tuple = model(UpperCAmelCase_ )[0] # logits
else: # no classification heads to worry about
UpperCAmelCase : Tuple = bart.model.state_dict()
remove_ignore_keys_(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = state_dict['decoder.embed_tokens.weight']
UpperCAmelCase : str = bart.extract_features(UpperCAmelCase_ )
if hf_checkpoint_name == "facebook/bart-large":
UpperCAmelCase : List[str] = BartModel(UpperCAmelCase_ ).eval()
model.load_state_dict(UpperCAmelCase_ )
UpperCAmelCase : str = model(UpperCAmelCase_ ).model[0]
else:
UpperCAmelCase : Dict = BartForConditionalGeneration(UpperCAmelCase_ ).eval() # an existing summarization ckpt
model.model.load_state_dict(UpperCAmelCase_ )
if hasattr(UpperCAmelCase_ , 'lm_head' ):
UpperCAmelCase : List[str] = make_linear_from_emb(model.model.shared )
UpperCAmelCase : Any = model.model(UpperCAmelCase_ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('Some values in `fairseq_output` are different from `new_model_outputs`' )
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config", default=None, type=str, help="Which huggingface architecture to use: bart-large-xsum"
)
lowercase__ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 695 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """whisper"""
UpperCAmelCase_ : Tuple = ["""past_key_values"""]
UpperCAmelCase_ : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : str , lowercase_ : Any=51_865 , lowercase_ : List[Any]=80 , lowercase_ : int=6 , lowercase_ : Dict=4 , lowercase_ : List[Any]=6 , lowercase_ : Any=4 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : List[Any]=50_257 , lowercase_ : Optional[int]=True , lowercase_ : Any=True , lowercase_ : str="gelu" , lowercase_ : List[str]=256 , lowercase_ : str=0.0 , lowercase_ : Any=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=False , lowercase_ : Union[str, Any]=1_500 , lowercase_ : List[Any]=448 , lowercase_ : int=50_256 , lowercase_ : Union[str, Any]=50_256 , lowercase_ : List[Any]=50_256 , lowercase_ : Tuple=None , lowercase_ : Optional[Any]=[220, 50_256] , lowercase_ : Tuple=False , lowercase_ : str=256 , lowercase_ : Optional[Any]=False , lowercase_ : List[Any]=0.05 , lowercase_ : Any=10 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=10 , lowercase_ : int=0 , lowercase_ : Optional[int]=7 , **lowercase_ : Union[str, Any] , ) -> List[str]:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Any = num_mel_bins
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : str = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_layers
UpperCAmelCase : Any = decoder_attention_heads
UpperCAmelCase : Tuple = decoder_ffn_dim
UpperCAmelCase : List[str] = encoder_ffn_dim
UpperCAmelCase : int = dropout
UpperCAmelCase : int = attention_dropout
UpperCAmelCase : List[Any] = activation_dropout
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Union[str, Any] = init_std
UpperCAmelCase : Dict = encoder_layerdrop
UpperCAmelCase : str = decoder_layerdrop
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase : Tuple = max_source_positions
UpperCAmelCase : List[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase : Optional[int] = classifier_proj_size
UpperCAmelCase : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Optional[Any] = apply_spec_augment
UpperCAmelCase : Optional[Any] = mask_time_prob
UpperCAmelCase : Optional[Any] = mask_time_length
UpperCAmelCase : str = mask_time_min_masks
UpperCAmelCase : List[str] = mask_feature_prob
UpperCAmelCase : Tuple = mask_feature_length
UpperCAmelCase : Optional[int] = mask_feature_min_masks
UpperCAmelCase : str = median_filter_width
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , suppress_tokens=lowercase_ , begin_suppress_tokens=lowercase_ , **lowercase_ , )
class A_ ( _snake_case ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase : Optional[int] = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase : int = {0: 'batch'}
else:
UpperCAmelCase : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='inputs' )
return common_inputs
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , lowercase_ : int = 22_050 , lowercase_ : float = 5.0 , lowercase_ : int = 220 , ) -> Mapping[str, Any]:
UpperCAmelCase : Tuple = OrderedDict()
UpperCAmelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase_ , framework=lowercase_ , sampling_rate=lowercase_ , time_duration=lowercase_ , frequency=lowercase_ , )
UpperCAmelCase : Optional[Any] = encoder_inputs['input_features'].shape[2]
UpperCAmelCase : Tuple = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase : Optional[int] = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Dict = encoder_inputs.pop('input_features' )
UpperCAmelCase : List[str] = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase : Union[str, Any] = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def UpperCAmelCase_ ( self : Dict ) -> float:
return 1E-3
| 695 | 1 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowercase__ = TypeVar("T")
class A_ ( Generic[T] ):
'''simple docstring'''
UpperCAmelCase_ : deque[T] # Cache store of keys
UpperCAmelCase_ : set[T] # References of the keys in cache
UpperCAmelCase_ : int = 10 # Maximum capacity of cache
def __init__( self : List[Any] , lowercase_ : int ) -> None:
UpperCAmelCase : Any = deque()
UpperCAmelCase : Dict = set()
if not n:
UpperCAmelCase : Optional[int] = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCAmelCase : str = n
def UpperCAmelCase_ ( self : List[str] , lowercase_ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCAmelCase : Optional[Any] = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> None:
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Union[str, Any] ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 695 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowercase__ = "Create a default config file for Accelerate with only a few flags set."
def UpperCamelCase( UpperCAmelCase_="no" , UpperCAmelCase_ = default_json_config_file , UpperCAmelCase_ = False ):
UpperCAmelCase : Any = Path(UpperCAmelCase_ )
path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
UpperCAmelCase : Optional[int] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
UpperCAmelCase : Dict = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
UpperCAmelCase : Dict = torch.cuda.device_count()
UpperCAmelCase : List[Any] = num_gpus
UpperCAmelCase : List[Any] = False
if num_gpus > 1:
UpperCAmelCase : Tuple = 'MULTI_GPU'
else:
UpperCAmelCase : Optional[Any] = 'NO'
elif is_xpu_available() and use_xpu:
UpperCAmelCase : Optional[int] = torch.xpu.device_count()
UpperCAmelCase : Optional[int] = num_xpus
UpperCAmelCase : Any = False
if num_xpus > 1:
UpperCAmelCase : Tuple = 'MULTI_XPU'
else:
UpperCAmelCase : str = 'NO'
elif is_npu_available():
UpperCAmelCase : Optional[int] = torch.npu.device_count()
UpperCAmelCase : str = num_npus
UpperCAmelCase : int = False
if num_npus > 1:
UpperCAmelCase : int = 'MULTI_NPU'
else:
UpperCAmelCase : List[str] = 'NO'
else:
UpperCAmelCase : str = 0
UpperCAmelCase : int = True
UpperCAmelCase : str = 1
UpperCAmelCase : str = 'NO'
UpperCAmelCase : Any = ClusterConfig(**UpperCAmelCase_ )
config.to_json_file(UpperCAmelCase_ )
return path
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = parser.add_parser('default' , parents=UpperCAmelCase_ , help=UpperCAmelCase_ , formatter_class=UpperCAmelCase_ )
parser.add_argument(
'--config_file' , default=UpperCAmelCase_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=UpperCAmelCase_ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=UpperCAmelCase_ )
return parser
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 695 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
lowercase__ = logging.getLogger(__name__)
lowercase__ = {"facebook/bart-base": BartForConditionalGeneration}
lowercase__ = {"facebook/bart-base": BartTokenizer}
def UpperCamelCase( ):
UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=UpperCAmelCase_ , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=UpperCAmelCase_ , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=UpperCAmelCase_ , )
parser.add_argument(
'--config_name' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=UpperCAmelCase_ , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='Where to store the final ONNX file.' )
UpperCAmelCase : Tuple = parser.parse_args()
return args
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_="cpu" ):
UpperCAmelCase : List[Any] = model_dict[model_name].from_pretrained(UpperCAmelCase_ ).to(UpperCAmelCase_ )
UpperCAmelCase : str = tokenizer_dict[model_name].from_pretrained(UpperCAmelCase_ )
if model_name in ["facebook/bart-base"]:
UpperCAmelCase : Tuple = 0
UpperCAmelCase : Tuple = None
UpperCAmelCase : Dict = 0
return huggingface_model, tokenizer
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
model.eval()
UpperCAmelCase : Any = None
UpperCAmelCase : Optional[int] = torch.jit.script(BARTBeamSearchGenerator(UpperCAmelCase_ ) )
with torch.no_grad():
UpperCAmelCase : Optional[Any] = 'My friends are cool but they eat too many carbs.'
UpperCAmelCase : Any = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=10_24 , return_tensors='pt' ).to(model.device )
UpperCAmelCase : Optional[Any] = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=UpperCAmelCase_ , max_length=UpperCAmelCase_ , early_stopping=UpperCAmelCase_ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
UpperCAmelCase_ , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , UpperCAmelCase_ , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=UpperCAmelCase_ , )
logger.info('Model exported to {}'.format(UpperCAmelCase_ ) )
UpperCAmelCase : List[str] = remove_dup_initializers(os.path.abspath(UpperCAmelCase_ ) )
logger.info('Deduplicated and optimized model written to {}'.format(UpperCAmelCase_ ) )
UpperCAmelCase : List[Any] = onnxruntime.InferenceSession(UpperCAmelCase_ )
UpperCAmelCase : Any = ort_sess.run(
UpperCAmelCase_ , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(UpperCAmelCase_ ),
'max_length': np.array(UpperCAmelCase_ ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def UpperCamelCase( ):
UpperCAmelCase : Optional[Any] = parse_args()
UpperCAmelCase : Union[str, Any] = 5
UpperCAmelCase : Dict = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
UpperCAmelCase : Union[str, Any] = torch.device(args.device )
UpperCAmelCase , UpperCAmelCase : Tuple = load_model_tokenizer(args.model_name_or_path , UpperCAmelCase_ )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(UpperCAmelCase_ )
if args.max_length:
UpperCAmelCase : Dict = args.max_length
if args.num_beams:
UpperCAmelCase : Tuple = args.num_beams
if args.output_file_path:
UpperCAmelCase : Tuple = args.output_file_path
else:
UpperCAmelCase : int = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 695 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> None:
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 695 | 1 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
lowercase__ = 6378137.0
lowercase__ = 6356752.314245
lowercase__ = 6378137
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : List[str] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
UpperCAmelCase : str = atan((1 - flattening) * tan(radians(UpperCAmelCase_ ) ) )
UpperCAmelCase : Union[str, Any] = atan((1 - flattening) * tan(radians(UpperCAmelCase_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
UpperCAmelCase : Tuple = haversine_distance(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
UpperCAmelCase : Optional[Any] = (b_lata + b_lata) / 2
UpperCAmelCase : Optional[int] = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
UpperCAmelCase : int = (sin(UpperCAmelCase_ ) ** 2) * (cos(UpperCAmelCase_ ) ** 2)
UpperCAmelCase : Union[str, Any] = cos(sigma / 2 ) ** 2
UpperCAmelCase : Optional[int] = (sigma - sin(UpperCAmelCase_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
UpperCAmelCase : Any = (cos(UpperCAmelCase_ ) ** 2) * (sin(UpperCAmelCase_ ) ** 2)
UpperCAmelCase : Tuple = sin(sigma / 2 ) ** 2
UpperCAmelCase : Union[str, Any] = (sigma + sin(UpperCAmelCase_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowercase__ = logging.getLogger(__name__)
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : str
UpperCAmelCase_ : str
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[int]
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[Union[int, float]] = None
UpperCAmelCase_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : List[str] , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = None , lowercase_ : List[str]=False , lowercase_ : bool = False , ) -> Optional[Any]:
UpperCAmelCase : Dict = hans_processors[task]()
UpperCAmelCase : List[Any] = os.path.join(
lowercase_ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(lowercase_ ) , lowercase_ , ) , )
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase : int = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase : Tuple = torch.load(lowercase_ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase : int = (
processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
)
logger.info('Training examples: %s' , len(lowercase_ ) )
UpperCAmelCase : Dict = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
logger.info('Saving features into cached file %s' , lowercase_ )
torch.save(self.features , lowercase_ )
def __len__( self : Union[str, Any] ) -> str:
return len(self.features )
def __getitem__( self : Dict , lowercase_ : Dict ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : Tuple , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = 128 , lowercase_ : int=False , lowercase_ : bool = False , ) -> Union[str, Any]:
UpperCAmelCase : int = hans_processors[task]()
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Any = label_list
UpperCAmelCase : str = processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
UpperCAmelCase : int = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(lowercase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase : Optional[Any] = tf.data.Dataset.from_generator(
lowercase_ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
return self.dataset
def __len__( self : Tuple ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : List[Any] , lowercase_ : Union[str, Any] ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
return self.label_list
class A_ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int , lowercase_ : Optional[int] ) -> Any:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_train_set.txt' ) ) , 'train' )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> List[str]:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
return ["contradiction", "entailment", "neutral"]
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : str ) -> Dict:
UpperCAmelCase : Union[str, Any] = []
for i, line in enumerate(lowercase_ ):
if i == 0:
continue
UpperCAmelCase : Tuple = '%s-%s' % (set_type, line[0])
UpperCAmelCase : Tuple = line[5]
UpperCAmelCase : Dict = line[6]
UpperCAmelCase : Optional[Any] = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCAmelCase : Optional[Any] = line[0]
examples.append(InputExample(guid=lowercase_ , text_a=lowercase_ , text_b=lowercase_ , label=lowercase_ , pairID=lowercase_ ) )
return examples
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
UpperCAmelCase : List[str] = {label: i for i, label in enumerate(UpperCAmelCase_ )}
UpperCAmelCase : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(UpperCAmelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
UpperCAmelCase : int = tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , truncation=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , )
UpperCAmelCase : List[str] = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase : Any = int(example.pairID )
features.append(InputFeatures(**UpperCAmelCase_ , label=UpperCAmelCase_ , pairID=UpperCAmelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
lowercase__ = {
"hans": 3,
}
lowercase__ = {
"hans": HansProcessor,
}
| 695 | 1 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
lowercase__ = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Dict = test_results.split(' ' )
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : Tuple = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
UpperCAmelCase : Union[str, Any] = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(UpperCAmelCase_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = {}
UpperCAmelCase : Any = None
UpperCAmelCase : Dict = False
for line in failures_short_lines.split('\n' ):
if re.search(R'_ \[doctest\]' , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = True
UpperCAmelCase : List[Any] = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
UpperCAmelCase : Dict = line
UpperCAmelCase : Optional[Any] = False
return failures
class A_ :
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : str , lowercase_ : Dict ) -> Tuple:
UpperCAmelCase : Tuple = title
UpperCAmelCase : Union[str, Any] = doc_test_results['time_spent'].split(',' )[0]
UpperCAmelCase : str = doc_test_results['success']
UpperCAmelCase : Any = doc_test_results['failures']
UpperCAmelCase : int = self.n_success + self.n_failures
# Failures and success of the modeling tests
UpperCAmelCase : int = doc_test_results
@property
def UpperCAmelCase_ ( self : int ) -> str:
UpperCAmelCase : int = [self._time_spent]
UpperCAmelCase : Optional[int] = 0
for time in time_spent:
UpperCAmelCase : Optional[Any] = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(lowercase_ ) == 1:
UpperCAmelCase : str = [0, 0, time_parts[0]]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_600 + minutes * 60 + seconds
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = total_secs // 3_600, (total_secs % 3_600) // 60, total_secs % 60
return f"""{int(lowercase_ )}h{int(lowercase_ )}m{int(lowercase_ )}s"""
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
f""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def UpperCAmelCase_ ( self : str ) -> Dict:
UpperCAmelCase : int = 40
UpperCAmelCase : Union[str, Any] = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(lowercase_ , lowercase_ )}
UpperCAmelCase : Union[str, Any] = ''
for category, failures in category_failures.items():
if len(lowercase_ ) == 0:
continue
if report != "":
report += "\n\n"
report += f"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(lowercase_ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def UpperCAmelCase_ ( self : List[str] ) -> str:
UpperCAmelCase : Optional[int] = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(lowercase_ )
@staticmethod
def UpperCAmelCase_ ( ) -> Dict:
UpperCAmelCase : Optional[int] = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(lowercase_ )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=lowercase_ , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
UpperCAmelCase : Dict = f"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else 'All tests passed.'
UpperCAmelCase : int = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=lowercase_ , )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : int ) -> str:
UpperCAmelCase : Tuple = ''
for key, value in failures.items():
UpperCAmelCase : int = value[:200] + ' [Truncated]' if len(lowercase_ ) > 250 else value
failures_text += f"""*{key}*\n_{value}_\n\n"""
UpperCAmelCase : Any = job_name
UpperCAmelCase : Any = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
UpperCAmelCase : Optional[Any] = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
UpperCAmelCase : Union[str, Any] = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
UpperCAmelCase : Optional[Any] = sorted(self.doc_test_results.items() , key=lambda lowercase_ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
UpperCAmelCase : Dict = f"""*Num failures* :{len(job_result["failed"] )} \n"""
UpperCAmelCase : Optional[Any] = job_result['failures']
UpperCAmelCase : Any = self.get_reply_blocks(lowercase_ , lowercase_ , lowercase_ , text=lowercase_ )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=f"""Results for {job}""" , blocks=lowercase_ , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def UpperCamelCase( ):
UpperCAmelCase : int = os.environ['GITHUB_RUN_ID']
UpperCAmelCase : Optional[int] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
UpperCAmelCase : Dict = requests.get(UpperCAmelCase_ ).json()
UpperCAmelCase : Dict = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
UpperCAmelCase : Union[str, Any] = math.ceil((result['total_count'] - 1_00) / 1_00 )
for i in range(UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = requests.get(url + F"""&page={i + 2}""" ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , UpperCAmelCase_ )
return {}
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[str] = {}
if os.path.exists(UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = os.listdir(UpperCAmelCase_ )
for file in files:
try:
with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , encoding='utf-8' ) as f:
UpperCAmelCase : Optional[Any] = f.read()
except UnicodeDecodeError as e:
raise ValueError(F"""Could not open {os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )}.""" ) from e
return _artifact
def UpperCamelCase( ):
class A_ :
'''simple docstring'''
def __init__( self : List[str] , lowercase_ : str ) -> Dict:
UpperCAmelCase : List[Any] = name
UpperCAmelCase : Tuple = []
def __str__( self : Any ) -> Dict:
return self.name
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : str ) -> List[Any]:
self.paths.append({'name': self.name, 'path': path} )
UpperCAmelCase : Dict[str, Artifact] = {}
UpperCAmelCase : Union[str, Any] = filter(os.path.isdir , os.listdir() )
for directory in directories:
UpperCAmelCase : Tuple = directory
if artifact_name not in _available_artifacts:
UpperCAmelCase : Optional[int] = Artifact(UpperCAmelCase_ )
_available_artifacts[artifact_name].add_path(UpperCAmelCase_ )
return _available_artifacts
if __name__ == "__main__":
lowercase__ = get_job_links()
lowercase__ = retrieve_available_artifacts()
lowercase__ = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
lowercase__ = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
lowercase__ = github_actions_job_links.get("run_doctests")
lowercase__ = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
lowercase__ = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
lowercase__ , lowercase__ , lowercase__ = handle_test_results(artifact["stats"])
lowercase__ = failed
lowercase__ = success
lowercase__ = time_spent[1:-1] + ", "
lowercase__ = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
lowercase__ = line.replace("FAILED ", "")
lowercase__ = line.split()[0].replace("\n", "")
if "::" in line:
lowercase__ , lowercase__ = line.split("::")
else:
lowercase__ , lowercase__ = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
lowercase__ = docs[file_regex]
doc_test_results[category]["failed"].append(test)
lowercase__ = all_failures[test] if test in all_failures else "N/A"
lowercase__ = failure
break
lowercase__ = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase , UpperCAmelCase : Any = 1, 1
UpperCAmelCase : Any = []
for i in range(1 , n + 1 ):
UpperCAmelCase : Tuple = prev_numerator + 2 * prev_denominator
UpperCAmelCase : Any = prev_numerator + prev_denominator
if len(str(UpperCAmelCase_ ) ) > len(str(UpperCAmelCase_ ) ):
result.append(UpperCAmelCase_ )
UpperCAmelCase : Dict = numerator
UpperCAmelCase : Dict = denominator
return len(UpperCAmelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : jnp.ndarray
UpperCAmelCase_ : jnp.ndarray
class A_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : int
UpperCAmelCase_ : Tuple[int] = (16, 32, 96, 256)
UpperCAmelCase_ : jnp.dtype = jnp.floataa
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
UpperCAmelCase : Any = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCAmelCase : Optional[Any] = []
for i in range(len(self.block_out_channels ) - 1 ):
UpperCAmelCase : Optional[int] = self.block_out_channels[i]
UpperCAmelCase : Dict = self.block_out_channels[i + 1]
UpperCAmelCase : Tuple = nn.Conv(
lowercase_ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowercase_ )
UpperCAmelCase : Optional[int] = nn.Conv(
lowercase_ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowercase_ )
UpperCAmelCase : Tuple = blocks
UpperCAmelCase : Optional[Any] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : str , lowercase_ : List[Any] ) -> Dict:
UpperCAmelCase : Optional[Any] = self.conv_in(lowercase_ )
UpperCAmelCase : str = nn.silu(lowercase_ )
for block in self.blocks:
UpperCAmelCase : int = block(lowercase_ )
UpperCAmelCase : Tuple = nn.silu(lowercase_ )
UpperCAmelCase : Tuple = self.conv_out(lowercase_ )
return embedding
@flax_register_to_config
class A_ ( nn.Module , _snake_case , _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = 32
UpperCAmelCase_ : int = 4
UpperCAmelCase_ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCAmelCase_ : Union[bool, Tuple[bool]] = False
UpperCAmelCase_ : Tuple[int] = (320, 640, 1_280, 1_280)
UpperCAmelCase_ : int = 2
UpperCAmelCase_ : Union[int, Tuple[int]] = 8
UpperCAmelCase_ : Optional[Union[int, Tuple[int]]] = None
UpperCAmelCase_ : int = 1_280
UpperCAmelCase_ : float = 0.0
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : jnp.dtype = jnp.floataa
UpperCAmelCase_ : bool = True
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : str = "rgb"
UpperCAmelCase_ : Tuple[int] = (16, 32, 96, 256)
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : jax.random.KeyArray ) -> FrozenDict:
# init input tensors
UpperCAmelCase : Tuple = (1, self.in_channels, self.sample_size, self.sample_size)
UpperCAmelCase : Tuple = jnp.zeros(lowercase_ , dtype=jnp.floataa )
UpperCAmelCase : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
UpperCAmelCase : Union[str, Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
UpperCAmelCase : Optional[int] = (1, 3, self.sample_size * 8, self.sample_size * 8)
UpperCAmelCase : str = jnp.zeros(lowercase_ , dtype=jnp.floataa )
UpperCAmelCase , UpperCAmelCase : Tuple = jax.random.split(lowercase_ )
UpperCAmelCase : Dict = {'params': params_rng, 'dropout': dropout_rng}
return self.init(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )["params"]
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : Any = self.block_out_channels
UpperCAmelCase : Tuple = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
UpperCAmelCase : List[Any] = self.num_attention_heads or self.attention_head_dim
# input
UpperCAmelCase : Optional[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
UpperCAmelCase : List[Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
UpperCAmelCase : Optional[int] = FlaxTimestepEmbedding(lowercase_ , dtype=self.dtype )
UpperCAmelCase : int = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
UpperCAmelCase : List[Any] = self.only_cross_attention
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : Optional[Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : List[Any] = (num_attention_heads,) * len(self.down_block_types )
# down
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : Optional[int] = block_out_channels[0]
UpperCAmelCase : str = nn.Conv(
lowercase_ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowercase_ )
for i, down_block_type in enumerate(self.down_block_types ):
UpperCAmelCase : Union[str, Any] = output_channel
UpperCAmelCase : Union[str, Any] = block_out_channels[i]
UpperCAmelCase : Dict = i == len(lowercase_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
UpperCAmelCase : str = FlaxCrossAttnDownBlockaD(
in_channels=lowercase_ , out_channels=lowercase_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
UpperCAmelCase : Tuple = FlaxDownBlockaD(
in_channels=lowercase_ , out_channels=lowercase_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowercase_ )
for _ in range(self.layers_per_block ):
UpperCAmelCase : Any = nn.Conv(
lowercase_ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowercase_ )
if not is_final_block:
UpperCAmelCase : Any = nn.Conv(
lowercase_ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowercase_ )
UpperCAmelCase : int = down_blocks
UpperCAmelCase : Dict = controlnet_down_blocks
# mid
UpperCAmelCase : Union[str, Any] = block_out_channels[-1]
UpperCAmelCase : Any = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowercase_ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
UpperCAmelCase : Optional[Any] = nn.Conv(
lowercase_ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Union[str, Any] , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : float = 1.0 , lowercase_ : bool = True , lowercase_ : bool = False , ) -> Union[FlaxControlNetOutput, Tuple]:
UpperCAmelCase : str = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
UpperCAmelCase : Any = jnp.flip(lowercase_ , axis=1 )
# 1. time
if not isinstance(lowercase_ , jnp.ndarray ):
UpperCAmelCase : Any = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowercase_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
UpperCAmelCase : Optional[Any] = timesteps.astype(dtype=jnp.floataa )
UpperCAmelCase : Any = jnp.expand_dims(lowercase_ , 0 )
UpperCAmelCase : Any = self.time_proj(lowercase_ )
UpperCAmelCase : List[Any] = self.time_embedding(lowercase_ )
# 2. pre-process
UpperCAmelCase : Dict = jnp.transpose(lowercase_ , (0, 2, 3, 1) )
UpperCAmelCase : Optional[int] = self.conv_in(lowercase_ )
UpperCAmelCase : Optional[int] = jnp.transpose(lowercase_ , (0, 2, 3, 1) )
UpperCAmelCase : Any = self.controlnet_cond_embedding(lowercase_ )
sample += controlnet_cond
# 3. down
UpperCAmelCase : List[str] = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase , UpperCAmelCase : int = down_block(lowercase_ , lowercase_ , lowercase_ , deterministic=not train )
else:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = down_block(lowercase_ , lowercase_ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
UpperCAmelCase : Tuple = self.mid_block(lowercase_ , lowercase_ , lowercase_ , deterministic=not train )
# 5. contronet blocks
UpperCAmelCase : Optional[int] = ()
for down_block_res_sample, controlnet_block in zip(lowercase_ , self.controlnet_down_blocks ):
UpperCAmelCase : Tuple = controlnet_block(lowercase_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
UpperCAmelCase : Union[str, Any] = controlnet_down_block_res_samples
UpperCAmelCase : List[str] = self.controlnet_mid_block(lowercase_ )
# 6. scaling
UpperCAmelCase : Dict = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowercase_ , mid_block_res_sample=lowercase_ )
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase : List[Any] = 2**power
UpperCAmelCase : List[Any] = 0
while n:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 695 | 1 |
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class A_ ( _snake_case , _snake_case ):
'''simple docstring'''
@register_to_config
def __init__( self : int , lowercase_ : int = 128 , lowercase_ : int = 256 , lowercase_ : float = 2000.0 , lowercase_ : int = 768 , lowercase_ : int = 12 , lowercase_ : int = 12 , lowercase_ : int = 64 , lowercase_ : int = 2_048 , lowercase_ : float = 0.1 , ) -> Any:
super().__init__()
UpperCAmelCase : Optional[int] = nn.Sequential(
nn.Linear(lowercase_ , d_model * 4 , bias=lowercase_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowercase_ ) , nn.SiLU() , )
UpperCAmelCase : Tuple = nn.Embedding(lowercase_ , lowercase_ )
UpperCAmelCase : int = False
UpperCAmelCase : Optional[Any] = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ )
UpperCAmelCase : str = nn.Dropout(p=lowercase_ )
UpperCAmelCase : Optional[int] = nn.ModuleList()
for lyr_num in range(lowercase_ ):
# FiLM conditional T5 decoder
UpperCAmelCase : List[Any] = DecoderLayer(d_model=lowercase_ , d_kv=lowercase_ , num_heads=lowercase_ , d_ff=lowercase_ , dropout_rate=lowercase_ )
self.decoders.append(lowercase_ )
UpperCAmelCase : Tuple = TaLayerNorm(lowercase_ )
UpperCAmelCase : Union[str, Any] = nn.Dropout(p=lowercase_ )
UpperCAmelCase : Optional[int] = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : Tuple ) -> Union[str, Any]:
UpperCAmelCase : Any = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCAmelCase_ ( self : str , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Tuple ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCAmelCase : str = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
UpperCAmelCase : str = self.conditioning_emb(lowercase_ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCAmelCase : Optional[int] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCAmelCase : List[str] = torch.broadcast_to(
torch.arange(lowercase_ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
UpperCAmelCase : str = self.position_encoding(lowercase_ )
UpperCAmelCase : Optional[Any] = self.continuous_inputs_projection(lowercase_ )
inputs += position_encodings
UpperCAmelCase : Optional[Any] = self.dropout(lowercase_ )
# decoder: No padding present.
UpperCAmelCase : Tuple = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
UpperCAmelCase : List[Any] = [(x, self.encoder_decoder_mask(lowercase_ , lowercase_ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCAmelCase : Dict = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
UpperCAmelCase : Any = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
UpperCAmelCase : Optional[int] = lyr(
lowercase_ , conditioning_emb=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )[0]
UpperCAmelCase : int = self.decoder_norm(lowercase_ )
UpperCAmelCase : int = self.post_dropout(lowercase_ )
UpperCAmelCase : Union[str, Any] = self.spec_out(lowercase_ )
return spec_out
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[str]=1E-6 ) -> str:
super().__init__()
UpperCAmelCase : List[str] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowercase_ , d_kv=lowercase_ , num_heads=lowercase_ , dropout_rate=lowercase_ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowercase_ , d_kv=lowercase_ , num_heads=lowercase_ , dropout_rate=lowercase_ , layer_norm_epsilon=lowercase_ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowercase_ , d_ff=lowercase_ , dropout_rate=lowercase_ , layer_norm_epsilon=lowercase_ ) )
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Optional[Any]=None , lowercase_ : Dict=None , lowercase_ : List[Any]=None , lowercase_ : Optional[int]=None , lowercase_ : Tuple=None , ) -> str:
UpperCAmelCase : int = self.layer[0](
lowercase_ , conditioning_emb=lowercase_ , attention_mask=lowercase_ , )
if encoder_hidden_states is not None:
UpperCAmelCase : Optional[Any] = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
UpperCAmelCase : str = self.layer[1](
lowercase_ , key_value_states=lowercase_ , attention_mask=lowercase_ , )
# Apply Film Conditional Feed Forward layer
UpperCAmelCase : List[str] = self.layer[-1](lowercase_ , lowercase_ )
return (hidden_states,)
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : Optional[int] ) -> List[str]:
super().__init__()
UpperCAmelCase : Optional[Any] = TaLayerNorm(lowercase_ )
UpperCAmelCase : Optional[int] = TaFiLMLayer(in_features=d_model * 4 , out_features=lowercase_ )
UpperCAmelCase : Optional[int] = Attention(query_dim=lowercase_ , heads=lowercase_ , dim_head=lowercase_ , out_bias=lowercase_ , scale_qk=lowercase_ )
UpperCAmelCase : int = nn.Dropout(lowercase_ )
def UpperCAmelCase_ ( self : str , lowercase_ : Any , lowercase_ : List[str]=None , lowercase_ : Dict=None , ) -> List[Any]:
# pre_self_attention_layer_norm
UpperCAmelCase : List[Any] = self.layer_norm(lowercase_ )
if conditioning_emb is not None:
UpperCAmelCase : Optional[int] = self.FiLMLayer(lowercase_ , lowercase_ )
# Self-attention block
UpperCAmelCase : Any = self.attention(lowercase_ )
UpperCAmelCase : Optional[Any] = hidden_states + self.dropout(lowercase_ )
return hidden_states
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : str ) -> Optional[Any]:
super().__init__()
UpperCAmelCase : int = Attention(query_dim=lowercase_ , heads=lowercase_ , dim_head=lowercase_ , out_bias=lowercase_ , scale_qk=lowercase_ )
UpperCAmelCase : Dict = TaLayerNorm(lowercase_ , eps=lowercase_ )
UpperCAmelCase : int = nn.Dropout(lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : List[str]=None , lowercase_ : str=None , ) -> Union[str, Any]:
UpperCAmelCase : int = self.layer_norm(lowercase_ )
UpperCAmelCase : str = self.attention(
lowercase_ , encoder_hidden_states=lowercase_ , attention_mask=attention_mask.squeeze(1 ) , )
UpperCAmelCase : List[str] = hidden_states + self.dropout(lowercase_ )
return layer_output
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : Optional[Any] ) -> List[str]:
super().__init__()
UpperCAmelCase : str = TaDenseGatedActDense(d_model=lowercase_ , d_ff=lowercase_ , dropout_rate=lowercase_ )
UpperCAmelCase : List[str] = TaFiLMLayer(in_features=d_model * 4 , out_features=lowercase_ )
UpperCAmelCase : Union[str, Any] = TaLayerNorm(lowercase_ , eps=lowercase_ )
UpperCAmelCase : int = nn.Dropout(lowercase_ )
def UpperCAmelCase_ ( self : Dict , lowercase_ : List[Any] , lowercase_ : Any=None ) -> Any:
UpperCAmelCase : int = self.layer_norm(lowercase_ )
if conditioning_emb is not None:
UpperCAmelCase : Optional[int] = self.film(lowercase_ , lowercase_ )
UpperCAmelCase : List[Any] = self.DenseReluDense(lowercase_ )
UpperCAmelCase : List[str] = hidden_states + self.dropout(lowercase_ )
return hidden_states
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : str ) -> List[Any]:
super().__init__()
UpperCAmelCase : Union[str, Any] = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ )
UpperCAmelCase : str = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ )
UpperCAmelCase : Tuple = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ )
UpperCAmelCase : Tuple = nn.Dropout(lowercase_ )
UpperCAmelCase : Dict = NewGELUActivation()
def UpperCAmelCase_ ( self : int , lowercase_ : str ) -> Optional[Any]:
UpperCAmelCase : str = self.act(self.wi_a(lowercase_ ) )
UpperCAmelCase : Tuple = self.wi_a(lowercase_ )
UpperCAmelCase : Union[str, Any] = hidden_gelu * hidden_linear
UpperCAmelCase : List[Any] = self.dropout(lowercase_ )
UpperCAmelCase : Union[str, Any] = self.wo(lowercase_ )
return hidden_states
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowercase_ : int , lowercase_ : Optional[int]=1E-6 ) -> Optional[int]:
super().__init__()
UpperCAmelCase : int = nn.Parameter(torch.ones(lowercase_ ) )
UpperCAmelCase : Dict = eps
def UpperCAmelCase_ ( self : Dict , lowercase_ : List[Any] ) -> List[str]:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
UpperCAmelCase : Optional[Any] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowercase_ )
UpperCAmelCase : Union[str, Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCAmelCase : Optional[Any] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class A_ ( nn.Module ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict , lowercase_ : torch.Tensor ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(lowercase_ , 3.0 )) ))
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] ) -> Optional[int]:
super().__init__()
UpperCAmelCase : List[str] = nn.Linear(lowercase_ , out_features * 2 , bias=lowercase_ )
def UpperCAmelCase_ ( self : int , lowercase_ : List[Any] , lowercase_ : List[Any] ) -> Any:
UpperCAmelCase : int = self.scale_bias(lowercase_ )
UpperCAmelCase , UpperCAmelCase : List[str] = torch.chunk(lowercase_ , 2 , -1 )
UpperCAmelCase : Optional[Any] = x * (1 + scale) + shift
return x
| 695 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = """blip_2_vision_model"""
def __init__( self : List[str] , lowercase_ : int=1_408 , lowercase_ : Tuple=6_144 , lowercase_ : Dict=39 , lowercase_ : Optional[int]=16 , lowercase_ : str=224 , lowercase_ : Any=14 , lowercase_ : int="gelu" , lowercase_ : int=0.0_0001 , lowercase_ : Optional[int]=0.0 , lowercase_ : Dict=1E-10 , lowercase_ : List[str]=True , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
super().__init__(**lowercase_ )
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : str = patch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : str = qkv_bias
@classmethod
def UpperCAmelCase_ ( cls : List[str] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip_2_qformer"""
def __init__( self : Tuple , lowercase_ : Union[str, Any]=30_522 , lowercase_ : int=768 , lowercase_ : Dict=12 , lowercase_ : Dict=12 , lowercase_ : int=3_072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Tuple=512 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=1E-12 , lowercase_ : str=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Optional[int]=2 , lowercase_ : str=1_408 , **lowercase_ : Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Any = layer_norm_eps
UpperCAmelCase : Dict = position_embedding_type
UpperCAmelCase : Any = cross_attention_frequency
UpperCAmelCase : Any = encoder_hidden_size
@classmethod
def UpperCAmelCase_ ( cls : List[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : List[str] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip-2"""
UpperCAmelCase_ : Any = True
def __init__( self : Union[str, Any] , lowercase_ : List[Any]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=None , lowercase_ : Dict=32 , **lowercase_ : Union[str, Any] ) -> Any:
super().__init__(**lowercase_ )
if vision_config is None:
UpperCAmelCase : Union[str, Any] = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
UpperCAmelCase : int = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
UpperCAmelCase : Dict = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
UpperCAmelCase : str = BlipaVisionConfig(**lowercase_ )
UpperCAmelCase : str = BlipaQFormerConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
UpperCAmelCase : int = CONFIG_MAPPING[text_model_type](**lowercase_ )
UpperCAmelCase : Optional[int] = self.text_config.tie_word_embeddings
UpperCAmelCase : Dict = self.text_config.is_encoder_decoder
UpperCAmelCase : Tuple = num_query_tokens
UpperCAmelCase : Tuple = self.vision_config.hidden_size
UpperCAmelCase : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase : Union[str, Any] = 1.0
UpperCAmelCase : Union[str, Any] = 0.02
@classmethod
def UpperCAmelCase_ ( cls : Dict , lowercase_ : BlipaVisionConfig , lowercase_ : BlipaQFormerConfig , lowercase_ : PretrainedConfig , **lowercase_ : List[Any] , ) -> Tuple:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowercase_ , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
UpperCAmelCase : Optional[int] = self.qformer_config.to_dict()
UpperCAmelCase : List[str] = self.text_config.to_dict()
UpperCAmelCase : str = self.__class__.model_type
return output
| 695 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
# Initialise PyTorch model
UpperCAmelCase : str = FunnelConfig.from_json_file(UpperCAmelCase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase : int = FunnelBaseModel(UpperCAmelCase_ ) if base_model else FunnelModel(UpperCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
lowercase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 695 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase__ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
lowercase__ = json.load(f)
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict , lowercase_ : Dict ) -> Tuple:
return FSMTTokenizer.from_pretrained(lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> Tuple:
UpperCAmelCase : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(lowercase_ ).to(lowercase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int , lowercase_ : Any ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCAmelCase : List[str] = f"""facebook/wmt19-{pair}"""
UpperCAmelCase : Optional[int] = self.get_tokenizer(lowercase_ )
UpperCAmelCase : int = self.get_model(lowercase_ )
UpperCAmelCase : List[Any] = bleu_data[pair]['src']
UpperCAmelCase : Optional[int] = bleu_data[pair]['tgt']
UpperCAmelCase : Any = tokenizer(lowercase_ , return_tensors='pt' , truncation=lowercase_ , padding='longest' ).to(lowercase_ )
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase : List[Any] = tokenizer.batch_decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
UpperCAmelCase : Any = calculate_bleu(lowercase_ , lowercase_ )
print(lowercase_ )
self.assertGreaterEqual(scores['bleu'] , lowercase_ )
| 695 | 1 |
'''simple docstring'''
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = CLIPConfig
UpperCAmelCase_ : int = ["""CLIPEncoderLayer"""]
def __init__( self : Optional[int] , lowercase_ : CLIPConfig ) -> Tuple:
super().__init__(lowercase_ )
UpperCAmelCase : List[str] = CLIPVisionModelWithProjection(config.vision_config )
UpperCAmelCase : str = nn.Linear(config.vision_config.projection_dim , 1 )
UpperCAmelCase : int = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def UpperCAmelCase_ ( self : str , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : int=0.5 , lowercase_ : Union[str, Any]=0.5 ) -> str:
UpperCAmelCase : int = self.vision_model(lowercase_ )[0]
UpperCAmelCase : List[str] = self.p_head(lowercase_ )
UpperCAmelCase : Any = nsfw_detected.flatten()
UpperCAmelCase : Union[str, Any] = nsfw_detected > p_threshold
UpperCAmelCase : int = nsfw_detected.tolist()
if any(lowercase_ ):
logger.warning(
'Potential NSFW content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, nsfw_detected_ in enumerate(lowercase_ ):
if nsfw_detected_:
UpperCAmelCase : List[str] = np.zeros(images[idx].shape )
UpperCAmelCase : Tuple = self.w_head(lowercase_ )
UpperCAmelCase : Union[str, Any] = watermark_detected.flatten()
UpperCAmelCase : Any = watermark_detected > w_threshold
UpperCAmelCase : str = watermark_detected.tolist()
if any(lowercase_ ):
logger.warning(
'Potential watermarked content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, watermark_detected_ in enumerate(lowercase_ ):
if watermark_detected_:
UpperCAmelCase : Dict = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 695 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """pix2struct_text_model"""
UpperCAmelCase_ : Union[str, Any] = ["""past_key_values"""]
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : int , lowercase_ : str=50_244 , lowercase_ : Tuple=768 , lowercase_ : List[Any]=64 , lowercase_ : List[Any]=2_048 , lowercase_ : Optional[Any]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=32 , lowercase_ : List[str]=128 , lowercase_ : List[Any]=0.1 , lowercase_ : List[str]=1E-6 , lowercase_ : Union[str, Any]=1.0 , lowercase_ : Dict="gelu_new" , lowercase_ : Any=0 , lowercase_ : Any=False , lowercase_ : List[Any]=0 , lowercase_ : Tuple=1 , lowercase_ : List[str]=False , lowercase_ : List[Any]=True , **lowercase_ : Union[str, Any] , ) -> Dict:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : int = hidden_size
UpperCAmelCase : List[Any] = d_kv
UpperCAmelCase : Any = d_ff
UpperCAmelCase : List[str] = num_layers
UpperCAmelCase : str = num_heads
UpperCAmelCase : List[Any] = relative_attention_num_buckets
UpperCAmelCase : Tuple = relative_attention_max_distance
UpperCAmelCase : str = dropout_rate
UpperCAmelCase : Optional[int] = layer_norm_epsilon
UpperCAmelCase : int = initializer_factor
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : List[Any] = eos_token_id
UpperCAmelCase : Union[str, Any] = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase : List[str] = dense_act_fn
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , tie_word_embeddings=lowercase_ , is_decoder=lowercase_ , **lowercase_ , )
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : str = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : Any = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = """pix2struct_vision_model"""
def __init__( self : str , lowercase_ : Any=768 , lowercase_ : Union[str, Any]=768 , lowercase_ : Union[str, Any]=2_048 , lowercase_ : Tuple=64 , lowercase_ : Dict=12 , lowercase_ : Optional[int]=12 , lowercase_ : int="gelu_new" , lowercase_ : List[Any]=1E-6 , lowercase_ : Optional[int]=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : str=1E-10 , lowercase_ : Dict=1.0 , lowercase_ : int=4_096 , lowercase_ : Tuple=32 , lowercase_ : Any=128 , **lowercase_ : Any , ) -> Tuple:
super().__init__(**lowercase_ )
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : Any = patch_embed_hidden_size
UpperCAmelCase : Optional[int] = d_ff
UpperCAmelCase : Dict = dropout_rate
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : str = initializer_factor
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : Union[str, Any] = dense_act_fn
UpperCAmelCase : Dict = seq_len
UpperCAmelCase : Optional[int] = relative_attention_num_buckets
UpperCAmelCase : Union[str, Any] = relative_attention_max_distance
UpperCAmelCase : str = d_kv
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Any ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Tuple = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : List[str] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """pix2struct"""
UpperCAmelCase_ : Dict = True
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : int=None , lowercase_ : Optional[Any]=1.0 , lowercase_ : List[str]=0.02 , lowercase_ : str=False , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , **lowercase_ : Optional[Any] , ) -> str:
super().__init__(tie_word_embeddings=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ )
if text_config is None:
UpperCAmelCase : Optional[int] = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
UpperCAmelCase : List[str] = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
UpperCAmelCase : Optional[Any] = PixaStructTextConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = PixaStructVisionConfig(**lowercase_ )
UpperCAmelCase : Optional[Any] = self.text_config.decoder_start_token_id
UpperCAmelCase : str = self.text_config.pad_token_id
UpperCAmelCase : Optional[int] = self.text_config.eos_token_id
UpperCAmelCase : Union[str, Any] = initializer_factor
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : str = is_vqa
@classmethod
def UpperCAmelCase_ ( cls : Tuple , lowercase_ : PixaStructTextConfig , lowercase_ : PixaStructVisionConfig , **lowercase_ : str ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.text_config.to_dict()
UpperCAmelCase : Dict = self.vision_config.to_dict()
UpperCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 695 | 1 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """MCTCTFeatureExtractor"""
UpperCAmelCase_ : Optional[Any] = """AutoTokenizer"""
def __init__( self : int , lowercase_ : str , lowercase_ : str ) -> Any:
super().__init__(lowercase_ , lowercase_ )
UpperCAmelCase : Union[str, Any] = self.feature_extractor
UpperCAmelCase : Optional[Any] = False
def __call__( self : Optional[int] , *lowercase_ : Dict , **lowercase_ : Tuple ) -> Optional[int]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowercase_ , **lowercase_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
UpperCAmelCase : Union[str, Any] = kwargs.pop('raw_speech' )
else:
UpperCAmelCase : Dict = kwargs.pop('audio' , lowercase_ )
UpperCAmelCase : List[str] = kwargs.pop('sampling_rate' , lowercase_ )
UpperCAmelCase : List[Any] = kwargs.pop('text' , lowercase_ )
if len(lowercase_ ) > 0:
UpperCAmelCase : Optional[Any] = args[0]
UpperCAmelCase : Optional[Any] = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
UpperCAmelCase : Tuple = self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_ )
if text is not None:
UpperCAmelCase : Tuple = self.tokenizer(lowercase_ , **lowercase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCAmelCase : Optional[int] = encodings['input_ids']
return inputs
def UpperCAmelCase_ ( self : Optional[Any] , *lowercase_ : Any , **lowercase_ : Union[str, Any] ) -> int:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self : Any , *lowercase_ : Tuple , **lowercase_ : List[Any] ) -> List[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*lowercase_ , **lowercase_ )
UpperCAmelCase : Dict = kwargs.pop('input_features' , lowercase_ )
UpperCAmelCase : Any = kwargs.pop('labels' , lowercase_ )
if len(lowercase_ ) > 0:
UpperCAmelCase : int = args[0]
UpperCAmelCase : Union[str, Any] = args[1:]
if input_features is not None:
UpperCAmelCase : List[Any] = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_ )
if labels is not None:
UpperCAmelCase : List[Any] = self.tokenizer.pad(lowercase_ , **lowercase_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
UpperCAmelCase : Union[str, Any] = labels['input_ids']
return input_features
def UpperCAmelCase_ ( self : Tuple , *lowercase_ : List[str] , **lowercase_ : Optional[int] ) -> List[Any]:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@contextmanager
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
UpperCAmelCase : str = True
UpperCAmelCase : str = self.tokenizer
yield
UpperCAmelCase : List[str] = self.feature_extractor
UpperCAmelCase : Any = False
| 695 |
'''simple docstring'''
import baseaa
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baaencode(string.encode('utf-8' ) )
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baadecode(UpperCAmelCase_ ).decode('utf-8' )
if __name__ == "__main__":
lowercase__ = "Hello World!"
lowercase__ = baseaa_encode(test)
print(encoded)
lowercase__ = baseaa_decode(encoded)
print(decoded)
| 695 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
UpperCAmelCase : Dict = self.dummy_uncond_unet
UpperCAmelCase : Dict = KarrasVeScheduler()
UpperCAmelCase : str = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : Dict = 'google/ncsnpp-celebahq-256'
UpperCAmelCase : Any = UNetaDModel.from_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = KarrasVeScheduler()
UpperCAmelCase : Dict = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = pipe(num_inference_steps=20 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 695 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if dataset.ndim != value_array.ndim:
UpperCAmelCase : str = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(UpperCAmelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCAmelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(UpperCAmelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCAmelCase : List[str] = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(UpperCAmelCase_ )
UpperCAmelCase : str = []
for value in value_array:
UpperCAmelCase : Optional[Any] = euclidean(UpperCAmelCase_ , dataset[0] )
UpperCAmelCase : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCAmelCase : Tuple = euclidean(UpperCAmelCase_ , UpperCAmelCase_ )
if dist > temp_dist:
UpperCAmelCase : List[str] = temp_dist
UpperCAmelCase : str = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) / (norm(UpperCAmelCase_ ) * norm(UpperCAmelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowercase__ = logging.get_logger("transformers.models.encodec")
lowercase__ = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
lowercase__ = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
lowercase__ = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
lowercase__ = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
lowercase__ = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
lowercase__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowercase__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowercase__ = []
lowercase__ = []
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
for attribute in key.split('.' ):
UpperCAmelCase : str = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
if weight_type is not None:
UpperCAmelCase : Optional[Any] = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape
else:
UpperCAmelCase : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase : Optional[Any] = value
elif weight_type == "weight_g":
UpperCAmelCase : List[str] = value
elif weight_type == "weight_v":
UpperCAmelCase : Optional[Any] = value
elif weight_type == "bias":
UpperCAmelCase : Optional[Any] = value
elif weight_type == "running_mean":
UpperCAmelCase : Dict = value
elif weight_type == "running_var":
UpperCAmelCase : List[str] = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase : List[Any] = value
elif weight_type == "weight_ih_l0":
UpperCAmelCase : Any = value
elif weight_type == "weight_hh_l0":
UpperCAmelCase : Optional[int] = value
elif weight_type == "bias_ih_l0":
UpperCAmelCase : List[str] = value
elif weight_type == "bias_hh_l0":
UpperCAmelCase : List[Any] = value
elif weight_type == "weight_ih_l1":
UpperCAmelCase : Optional[Any] = value
elif weight_type == "weight_hh_l1":
UpperCAmelCase : int = value
elif weight_type == "bias_ih_l1":
UpperCAmelCase : Tuple = value
elif weight_type == "bias_hh_l1":
UpperCAmelCase : str = value
else:
UpperCAmelCase : Dict = value
logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCAmelCase , UpperCAmelCase : List[Any] = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Dict = []
if model_name == "encodec_24khz" or "encodec_32khz":
UpperCAmelCase : str = MAPPING_24K
elif model_name == "encodec_48khz":
UpperCAmelCase : List[Any] = MAPPING_48K
else:
raise ValueError(F"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(UpperCAmelCase_ , UpperCAmelCase_ ):
logger.info(F"""{name} was ignored""" )
continue
UpperCAmelCase : int = False
for key, mapped_key in MAPPING.items():
if "*" in key:
UpperCAmelCase , UpperCAmelCase : Optional[int] = key.split('.*.' )
if prefix in name and suffix in name:
UpperCAmelCase : List[Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
UpperCAmelCase : Union[str, Any] = True
if "*" in mapped_key:
UpperCAmelCase : Optional[int] = name.split(UpperCAmelCase_ )[0].split('.' )[-2]
UpperCAmelCase : Optional[int] = mapped_key.replace('*' , UpperCAmelCase_ )
if "weight_g" in name:
UpperCAmelCase : List[Any] = 'weight_g'
elif "weight_v" in name:
UpperCAmelCase : Union[str, Any] = 'weight_v'
elif "weight_ih_l0" in name:
UpperCAmelCase : Any = 'weight_ih_l0'
elif "weight_hh_l0" in name:
UpperCAmelCase : Union[str, Any] = 'weight_hh_l0'
elif "bias_ih_l0" in name:
UpperCAmelCase : Union[str, Any] = 'bias_ih_l0'
elif "bias_hh_l0" in name:
UpperCAmelCase : List[Any] = 'bias_hh_l0'
elif "weight_ih_l1" in name:
UpperCAmelCase : Union[str, Any] = 'weight_ih_l1'
elif "weight_hh_l1" in name:
UpperCAmelCase : List[Any] = 'weight_hh_l1'
elif "bias_ih_l1" in name:
UpperCAmelCase : List[Any] = 'bias_ih_l1'
elif "bias_hh_l1" in name:
UpperCAmelCase : Union[str, Any] = 'bias_hh_l1'
elif "bias" in name:
UpperCAmelCase : Optional[int] = 'bias'
elif "weight" in name:
UpperCAmelCase : Union[str, Any] = 'weight'
elif "running_mean" in name:
UpperCAmelCase : Any = 'running_mean'
elif "running_var" in name:
UpperCAmelCase : Optional[int] = 'running_var'
elif "num_batches_tracked" in name:
UpperCAmelCase : Dict = 'num_batches_tracked'
else:
UpperCAmelCase : Tuple = None
set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , ):
if config_path is not None:
UpperCAmelCase : Dict = EncodecConfig.from_pretrained(UpperCAmelCase_ )
else:
UpperCAmelCase : List[Any] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
UpperCAmelCase : str = [8, 5, 4, 4]
UpperCAmelCase : str = [2.2]
UpperCAmelCase : List[Any] = 64
UpperCAmelCase : Tuple = 3_20_00
UpperCAmelCase : Tuple = 20_48
UpperCAmelCase : Any = False
UpperCAmelCase : List[str] = False
UpperCAmelCase : Optional[Any] = False
elif model_name == "encodec_48khz":
UpperCAmelCase : List[Any] = [8, 5, 4, 2]
UpperCAmelCase : Union[str, Any] = [3.0, 6.0, 12.0, 24.0]
UpperCAmelCase : Dict = 4_80_00
UpperCAmelCase : str = 2
UpperCAmelCase : Dict = False
UpperCAmelCase : List[str] = 'time_group_norm'
UpperCAmelCase : Any = True
UpperCAmelCase : Any = 1.0
UpperCAmelCase : List[Any] = 0.01
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
UpperCAmelCase : Dict = EncodecModel(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(UpperCAmelCase_ )
UpperCAmelCase : str = torch.load(UpperCAmelCase_ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
UpperCAmelCase : Dict = original_checkpoint['best_state']
recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
model.save_pretrained(UpperCAmelCase_ )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(UpperCAmelCase_ )
model.push_to_hub(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
lowercase__ = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 695 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase__ = get_tests_dir("fixtures")
lowercase__ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
lowercase__ = get_tests_dir("fixtures/dummy-config.json")
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : Optional[Any] = 0
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ ).to_dict()
config_dict.pop('feature_extractor_type' )
UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor(**lowercase_ )
# save in new folder
model_config.save_pretrained(lowercase_ )
config.save_pretrained(lowercase_ )
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(lowercase_ )
# make sure private variable is not incorrectly saved
UpperCAmelCase : List[Any] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('bert-base' )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
with self.assertRaisesRegex(
lowercase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase : int = AutoFeatureExtractor.from_pretrained(lowercase_ , revision='aaaaaa' )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Any = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase : Dict = CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase_ ( self : int ) -> Tuple:
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = True
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# If remote code is not set, the default is to use local
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(lowercase_ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 695 | 1 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A_ :
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : Any , lowercase_ : Union[str, Any]=13 , lowercase_ : int=7 , lowercase_ : Union[str, Any]=False , lowercase_ : Optional[int]=True , lowercase_ : int=False , lowercase_ : List[Any]=True , lowercase_ : List[str]=33 , lowercase_ : Tuple=32 , lowercase_ : Dict=5 , lowercase_ : List[Any]=4 , lowercase_ : Union[str, Any]=37 , lowercase_ : Any="gelu" , lowercase_ : int=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : Optional[int]=512 , lowercase_ : List[str]=16 , lowercase_ : Union[str, Any]=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Optional[Any]=3 , lowercase_ : Dict=4 , lowercase_ : Optional[int]=None , ) -> Optional[Any]:
UpperCAmelCase : Any = parent
UpperCAmelCase : Optional[Any] = batch_size
UpperCAmelCase : List[str] = seq_length
UpperCAmelCase : int = is_training
UpperCAmelCase : Optional[Any] = use_input_mask
UpperCAmelCase : List[str] = use_token_type_ids
UpperCAmelCase : List[Any] = use_labels
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : Any = num_hidden_layers
UpperCAmelCase : List[str] = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : Dict = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : List[str] = max_position_embeddings
UpperCAmelCase : str = type_vocab_size
UpperCAmelCase : List[str] = type_sequence_label_size
UpperCAmelCase : Optional[int] = initializer_range
UpperCAmelCase : Optional[Any] = num_labels
UpperCAmelCase : int = num_choices
UpperCAmelCase : Any = scope
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Dict = None
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : List[str] = None
if self.use_labels:
UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> Any:
UpperCAmelCase : Tuple = EsmModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : Dict = model(lowercase_ , attention_mask=lowercase_ )
UpperCAmelCase : Dict = model(lowercase_ )
UpperCAmelCase : Tuple = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self : int , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : List[str] ) -> Any:
UpperCAmelCase : List[Any] = EsmForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : Optional[int] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : Tuple , lowercase_ : int , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[Any] ) -> List[Any]:
UpperCAmelCase : Optional[Any] = self.num_labels
UpperCAmelCase : Union[str, Any] = EsmForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : List[Any] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self : Dict ) -> Any:
UpperCAmelCase : int = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : str = config_and_inputs
UpperCAmelCase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : int = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ : Tuple = ()
UpperCAmelCase_ : Any = (
{
"""feature-extraction""": EsmModel,
"""fill-mask""": EsmForMaskedLM,
"""text-classification""": EsmForSequenceClassification,
"""token-classification""": EsmForTokenClassification,
"""zero-shot""": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ : List[str] = True
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
UpperCAmelCase : Union[str, Any] = EsmModelTester(self )
UpperCAmelCase : Any = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCAmelCase_ ( self : List[Any] ) -> str:
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : str = type
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> List[str]:
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def UpperCAmelCase_ ( self : int ) -> List[str]:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Optional[int] = EsmModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
UpperCAmelCase : Union[str, Any] = EsmEmbeddings(config=lowercase_ )
UpperCAmelCase : Tuple = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
UpperCAmelCase : List[str] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
UpperCAmelCase : Dict = create_position_ids_from_input_ids(lowercase_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(lowercase_ , lowercase_ ) ) )
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()[0]
UpperCAmelCase : Optional[int] = EsmEmbeddings(config=lowercase_ )
UpperCAmelCase : List[Any] = torch.empty(2 , 4 , 30 )
UpperCAmelCase : Optional[Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
UpperCAmelCase : Optional[Any] = torch.as_tensor([expected_single_positions, expected_single_positions] )
UpperCAmelCase : int = embeddings.create_position_ids_from_inputs_embeds(lowercase_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(lowercase_ , lowercase_ ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
pass
@unittest.skip('Esm does not support embedding resizing' )
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
pass
@require_torch
class A_ ( _snake_case ):
'''simple docstring'''
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
with torch.no_grad():
UpperCAmelCase : List[str] = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
UpperCAmelCase : Union[str, Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase : List[Any] = model(lowercase_ )[0]
UpperCAmelCase : Union[str, Any] = 33
UpperCAmelCase : Optional[Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , lowercase_ )
UpperCAmelCase : List[Any] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1E-4 ) )
@slow
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
with torch.no_grad():
UpperCAmelCase : List[Any] = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
UpperCAmelCase : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCAmelCase : Tuple = model(lowercase_ )[0]
# compare the actual values for a slice.
UpperCAmelCase : List[str] = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1E-4 ) )
| 695 |
'''simple docstring'''
from datetime import datetime
import requests
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
UpperCAmelCase : List[str] = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(UpperCAmelCase_ ).content
if __name__ == "__main__":
lowercase__ = input("Enter Video/IGTV url: ").strip()
lowercase__ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 695 | 1 |
'''simple docstring'''
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : Dict , *lowercase_ : Optional[int] , lowercase_ : Tuple=None , lowercase_ : List[str]=None , **lowercase_ : Tuple ) -> Dict:
super().__init__(*lowercase_ , **lowercase_ )
UpperCAmelCase : Tuple = eval_examples
UpperCAmelCase : Dict = post_process_function
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : Optional[Dataset] = None , lowercase_ : Any=None , lowercase_ : Optional[List[str]] = None , lowercase_ : str = "eval" , **lowercase_ : int , ) -> Dict[str, float]:
UpperCAmelCase : Tuple = gen_kwargs.copy()
UpperCAmelCase : int = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length' ) is not None else self.args.generation_max_length
)
UpperCAmelCase : Tuple = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams' ) is not None else self.args.generation_num_beams
)
UpperCAmelCase : Optional[Any] = gen_kwargs
UpperCAmelCase : str = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase : Optional[Any] = self.get_eval_dataloader(lowercase_ )
UpperCAmelCase : Dict = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase : Dict = self.compute_metrics
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : Tuple = time.time()
UpperCAmelCase : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase : str = eval_loop(
lowercase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
UpperCAmelCase : Union[str, Any] = compute_metrics
UpperCAmelCase : Optional[int] = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase : int = self.post_process_function(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Tuple = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCAmelCase : List[Any] = metrics.pop(lowercase_ )
metrics.update(output.metrics )
else:
UpperCAmelCase : Union[str, Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase : List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_ )
return metrics
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Optional[int]=None , lowercase_ : str = "test" , **lowercase_ : int ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = gen_kwargs.copy()
UpperCAmelCase : List[Any] = self.get_test_dataloader(lowercase_ )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase : List[Any] = self.compute_metrics
UpperCAmelCase : Dict = None
UpperCAmelCase : str = time.time()
UpperCAmelCase : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase : Optional[int] = eval_loop(
lowercase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
UpperCAmelCase : Dict = compute_metrics
UpperCAmelCase : Optional[int] = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase : List[str] = self.post_process_function(lowercase_ , lowercase_ , lowercase_ , 'predict' )
UpperCAmelCase : Tuple = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCAmelCase : Optional[int] = metrics.pop(lowercase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_ )
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10**9 ):
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : List[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCAmelCase : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = """llama"""
UpperCAmelCase_ : Union[str, Any] = ["""past_key_values"""]
def __init__( self : Any , lowercase_ : Union[str, Any]=32_000 , lowercase_ : int=4_096 , lowercase_ : Optional[Any]=11_008 , lowercase_ : Optional[int]=32 , lowercase_ : List[Any]=32 , lowercase_ : Tuple=None , lowercase_ : str="silu" , lowercase_ : Optional[int]=2_048 , lowercase_ : Optional[int]=0.02 , lowercase_ : int=1E-6 , lowercase_ : Optional[int]=True , lowercase_ : int=0 , lowercase_ : Any=1 , lowercase_ : Any=2 , lowercase_ : List[Any]=1 , lowercase_ : Optional[Any]=False , lowercase_ : List[Any]=None , **lowercase_ : Any , ) -> Optional[int]:
UpperCAmelCase : Any = vocab_size
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : List[Any] = intermediate_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : Optional[Any] = num_key_value_heads
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : str = initializer_range
UpperCAmelCase : Any = rms_norm_eps
UpperCAmelCase : Tuple = pretraining_tp
UpperCAmelCase : Optional[Any] = use_cache
UpperCAmelCase : Tuple = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ , )
def UpperCAmelCase_ ( self : str ) -> Dict:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f"""got {self.rope_scaling}""" )
UpperCAmelCase : Optional[int] = self.rope_scaling.get('type' , lowercase_ )
UpperCAmelCase : Dict = self.rope_scaling.get('factor' , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 695 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase : str = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
UpperCAmelCase : Any = self.dummy_uncond_unet
UpperCAmelCase : Tuple = DDIMScheduler()
UpperCAmelCase : Optional[Any] = self.dummy_vq_model
UpperCAmelCase : str = LDMPipeline(unet=lowercase_ , vqvae=lowercase_ , scheduler=lowercase_ )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : int = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' ).images
UpperCAmelCase : int = torch.manual_seed(0 )
UpperCAmelCase : Tuple = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : List[str] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCAmelCase : Tuple = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : Any = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Tuple = torch.manual_seed(0 )
UpperCAmelCase : Dict = ldm(generator=lowercase_ , num_inference_steps=5 , output_type='numpy' ).images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCAmelCase : Any = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 695 | 1 |
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_="shi-labs/oneformer_demo" ):
with open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='dataset' ) , 'r' ) as f:
UpperCAmelCase : List[str] = json.load(UpperCAmelCase_ )
UpperCAmelCase : List[str] = {}
UpperCAmelCase : List[Any] = []
UpperCAmelCase : Optional[Any] = []
for key, info in class_info.items():
UpperCAmelCase : Dict = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(UpperCAmelCase_ ) )
UpperCAmelCase : Union[str, Any] = thing_ids
UpperCAmelCase : str = class_names
return metadata
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : List[str] , lowercase_ : List[Any]=7 , lowercase_ : Optional[Any]=3 , lowercase_ : Dict=30 , lowercase_ : int=400 , lowercase_ : str=None , lowercase_ : Optional[int]=True , lowercase_ : List[str]=True , lowercase_ : Any=[0.5, 0.5, 0.5] , lowercase_ : Dict=[0.5, 0.5, 0.5] , lowercase_ : str=10 , lowercase_ : List[str]=False , lowercase_ : Optional[Any]=255 , lowercase_ : str="shi-labs/oneformer_demo" , lowercase_ : int="ade20k_panoptic.json" , lowercase_ : int=10 , ) -> int:
UpperCAmelCase : List[Any] = parent
UpperCAmelCase : Tuple = batch_size
UpperCAmelCase : str = num_channels
UpperCAmelCase : Optional[int] = min_resolution
UpperCAmelCase : List[Any] = max_resolution
UpperCAmelCase : Union[str, Any] = do_resize
UpperCAmelCase : List[str] = {'shortest_edge': 32, 'longest_edge': 1_333} if size is None else size
UpperCAmelCase : Any = do_normalize
UpperCAmelCase : Optional[int] = image_mean
UpperCAmelCase : Dict = image_std
UpperCAmelCase : List[str] = class_info_file
UpperCAmelCase : List[Any] = prepare_metadata(lowercase_ , lowercase_ )
UpperCAmelCase : Any = num_text
UpperCAmelCase : Union[str, Any] = repo_path
# for the post_process_functions
UpperCAmelCase : Any = 2
UpperCAmelCase : List[Any] = 10
UpperCAmelCase : Optional[Any] = 10
UpperCAmelCase : Optional[Any] = 3
UpperCAmelCase : Tuple = 4
UpperCAmelCase : Optional[Any] = num_labels
UpperCAmelCase : Optional[Any] = do_reduce_labels
UpperCAmelCase : Dict = ignore_index
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : str , lowercase_ : str=False ) -> Dict:
if not batched:
UpperCAmelCase : Tuple = image_inputs[0]
if isinstance(lowercase_ , Image.Image ):
UpperCAmelCase , UpperCAmelCase : Dict = image.size
else:
UpperCAmelCase , UpperCAmelCase : List[Any] = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase : List[str] = int(self.size['shortest_edge'] * h / w )
UpperCAmelCase : Optional[int] = self.size['shortest_edge']
elif w > h:
UpperCAmelCase : List[str] = self.size['shortest_edge']
UpperCAmelCase : Optional[int] = int(self.size['shortest_edge'] * w / h )
else:
UpperCAmelCase : Optional[int] = self.size['shortest_edge']
UpperCAmelCase : str = self.size['shortest_edge']
else:
UpperCAmelCase : int = []
for image in image_inputs:
UpperCAmelCase , UpperCAmelCase : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase : str = max(lowercase_ , key=lambda lowercase_ : item[0] )[0]
UpperCAmelCase : int = max(lowercase_ , key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
UpperCAmelCase_ : Union[str, Any] = image_processing_class
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
UpperCAmelCase : str = OneFormerImageProcessorTester(self )
@property
def UpperCAmelCase_ ( self : int ) -> Any:
return self.image_processing_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , 'image_mean' ) )
self.assertTrue(hasattr(lowercase_ , 'image_std' ) )
self.assertTrue(hasattr(lowercase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase_ , 'do_resize' ) )
self.assertTrue(hasattr(lowercase_ , 'size' ) )
self.assertTrue(hasattr(lowercase_ , 'ignore_index' ) )
self.assertTrue(hasattr(lowercase_ , 'class_info_file' ) )
self.assertTrue(hasattr(lowercase_ , 'num_text' ) )
self.assertTrue(hasattr(lowercase_ , 'repo_path' ) )
self.assertTrue(hasattr(lowercase_ , 'metadata' ) )
self.assertTrue(hasattr(lowercase_ , 'do_reduce_labels' ) )
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
# Initialize image_processor
UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
UpperCAmelCase : List[str] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.image_processing_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.image_processing_tester.get_expected_values(lowercase_ , batched=lowercase_ )
UpperCAmelCase : Optional[Any] = image_processor(
lowercase_ , ['semantic'] * len(lowercase_ ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
# Initialize image_processor
UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
UpperCAmelCase : Any = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.image_processing_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.image_processing_tester.get_expected_values(lowercase_ , batched=lowercase_ )
UpperCAmelCase : Optional[int] = image_processor(
lowercase_ , ['semantic'] * len(lowercase_ ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
# Initialize image_processor
UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
UpperCAmelCase : Optional[Any] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
UpperCAmelCase , UpperCAmelCase : int = self.image_processing_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase , UpperCAmelCase : List[Any] = self.image_processing_tester.get_expected_values(lowercase_ , batched=lowercase_ )
UpperCAmelCase : Any = image_processor(
lowercase_ , ['semantic'] * len(lowercase_ ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self : Tuple , lowercase_ : Union[str, Any]=False , lowercase_ : Any=False , lowercase_ : List[Any]="np" ) -> List[str]:
UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCAmelCase : Optional[Any] = self.image_processing_tester.num_labels
UpperCAmelCase : Any = None
UpperCAmelCase : Dict = None
UpperCAmelCase : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowercase_ )
if with_segmentation_maps:
UpperCAmelCase : Optional[int] = num_labels
if is_instance_map:
UpperCAmelCase : Dict = list(range(lowercase_ ) ) * 2
UpperCAmelCase : int = dict(enumerate(lowercase_ ) )
UpperCAmelCase : str = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCAmelCase : str = [Image.fromarray(lowercase_ ) for annotation in annotations]
UpperCAmelCase : List[str] = image_processor(
lowercase_ , ['semantic'] * len(lowercase_ ) , lowercase_ , return_tensors='pt' , instance_id_to_semantic_id=lowercase_ , pad_and_return_pixel_mask=lowercase_ , )
return inputs
def UpperCAmelCase_ ( self : List[Any] ) -> str:
pass
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
def common(lowercase_ : Optional[int]=False , lowercase_ : List[str]=None ):
UpperCAmelCase : Any = self.comm_get_image_processor_inputs(
with_segmentation_maps=lowercase_ , is_instance_map=lowercase_ , segmentation_type=lowercase_ )
UpperCAmelCase : int = inputs['mask_labels']
UpperCAmelCase : Optional[Any] = inputs['class_labels']
UpperCAmelCase : Optional[int] = inputs['pixel_values']
UpperCAmelCase : Dict = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(lowercase_ , lowercase_ , lowercase_ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(lowercase_ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=lowercase_ )
common(is_instance_map=lowercase_ , segmentation_type='pil' )
common(is_instance_map=lowercase_ , segmentation_type='pil' )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
UpperCAmelCase : Union[str, Any] = np.zeros((20, 50) )
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : Optional[int] = 1
UpperCAmelCase : List[str] = 1
UpperCAmelCase : List[str] = binary_mask_to_rle(lowercase_ )
self.assertEqual(len(lowercase_ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
UpperCAmelCase : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
UpperCAmelCase : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase : Tuple = fature_extractor.post_process_semantic_segmentation(lowercase_ )
self.assertEqual(len(lowercase_ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
UpperCAmelCase : int = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCAmelCase : int = fature_extractor.post_process_semantic_segmentation(lowercase_ , target_sizes=lowercase_ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def UpperCAmelCase_ ( self : int ) -> Any:
UpperCAmelCase : List[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
UpperCAmelCase : Any = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase : int = image_processor.post_process_instance_segmentation(lowercase_ , threshold=0 )
self.assertTrue(len(lowercase_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , lowercase_ )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def UpperCAmelCase_ ( self : List[str] ) -> List[Any]:
UpperCAmelCase : Tuple = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
UpperCAmelCase : List[str] = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase : Any = image_processor.post_process_panoptic_segmentation(lowercase_ , threshold=0 )
self.assertTrue(len(lowercase_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , lowercase_ )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 695 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
UpperCAmelCase : Dict = self.dummy_uncond_unet
UpperCAmelCase : Dict = KarrasVeScheduler()
UpperCAmelCase : str = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : Dict = 'google/ncsnpp-celebahq-256'
UpperCAmelCase : Any = UNetaDModel.from_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = KarrasVeScheduler()
UpperCAmelCase : Dict = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = pipe(num_inference_steps=20 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 695 | 1 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
lowercase__ = "docs/source/en/_toctree.yml"
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[str] = defaultdict(UpperCAmelCase_ )
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : List[str] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(UpperCAmelCase_ )
UpperCAmelCase : Any = new_doc_list
UpperCAmelCase : List[str] = [key for key, value in counts.items() if value > 1]
UpperCAmelCase : Dict = []
for duplicate_key in duplicates:
UpperCAmelCase : Any = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(UpperCAmelCase_ ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
UpperCAmelCase : int = sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(UpperCAmelCase_ ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(UpperCAmelCase_ )
# Sort
return overview_doc
def UpperCamelCase( UpperCAmelCase_=False ):
with open(UpperCAmelCase_ , encoding='utf-8' ) as f:
UpperCAmelCase : Optional[Any] = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase : List[str] = content[api_idx]['sections']
# Then to the model doc
UpperCAmelCase : str = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
UpperCAmelCase : int = api_doc[scheduler_idx]['sections']
UpperCAmelCase : Tuple = clean_doc_toc(UpperCAmelCase_ )
UpperCAmelCase : List[str] = False
if new_scheduler_doc != scheduler_doc:
UpperCAmelCase : int = True
if overwrite:
UpperCAmelCase : Optional[Any] = new_scheduler_doc
if diff:
if overwrite:
UpperCAmelCase : Optional[int] = api_doc
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def UpperCamelCase( UpperCAmelCase_=False ):
with open(UpperCAmelCase_ , encoding='utf-8' ) as f:
UpperCAmelCase : List[str] = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase : str = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase : int = content[api_idx]['sections']
# Then to the model doc
UpperCAmelCase : Union[str, Any] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Union[str, Any] = api_doc[pipeline_idx]['sections']
UpperCAmelCase : List[Any] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
UpperCAmelCase : int = pipeline_doc['section']
UpperCAmelCase : List[str] = clean_doc_toc(UpperCAmelCase_ )
if overwrite:
UpperCAmelCase : List[str] = new_sub_pipeline_doc
new_pipeline_docs.append(UpperCAmelCase_ )
# sort overall pipeline doc
UpperCAmelCase : Optional[Any] = clean_doc_toc(UpperCAmelCase_ )
if new_pipeline_docs != pipeline_docs:
UpperCAmelCase : Any = True
if overwrite:
UpperCAmelCase : str = new_pipeline_docs
if diff:
if overwrite:
UpperCAmelCase : Tuple = api_doc
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowercase__ = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 695 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = """autoformer"""
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Dict , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : str = "student_t" , lowercase_ : str = "nll" , lowercase_ : int = 1 , lowercase_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowercase_ : bool = True , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : Optional[List[int]] = None , lowercase_ : Optional[List[int]] = None , lowercase_ : int = 64 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 32 , lowercase_ : int = 32 , lowercase_ : str = "gelu" , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : int = 100 , lowercase_ : float = 0.02 , lowercase_ : bool = True , lowercase_ : Union[str, Any]=True , lowercase_ : int = 10 , lowercase_ : int = 25 , lowercase_ : int = 3 , **lowercase_ : str , ) -> Dict:
# time series specific configuration
UpperCAmelCase : int = prediction_length
UpperCAmelCase : Optional[Any] = context_length if context_length is not None else prediction_length
UpperCAmelCase : List[Any] = distribution_output
UpperCAmelCase : Tuple = loss
UpperCAmelCase : Dict = input_size
UpperCAmelCase : Dict = num_time_features
UpperCAmelCase : Tuple = lags_sequence
UpperCAmelCase : str = scaling
UpperCAmelCase : Optional[int] = num_dynamic_real_features
UpperCAmelCase : List[str] = num_static_real_features
UpperCAmelCase : Optional[int] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : int = cardinality
else:
UpperCAmelCase : Union[str, Any] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : Any = embedding_dimension
else:
UpperCAmelCase : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase : Dict = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase : Optional[int] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : Dict = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_attention_heads
UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase : str = decoder_ffn_dim
UpperCAmelCase : str = encoder_layers
UpperCAmelCase : Optional[Any] = decoder_layers
UpperCAmelCase : int = dropout
UpperCAmelCase : Any = attention_dropout
UpperCAmelCase : Tuple = activation_dropout
UpperCAmelCase : str = encoder_layerdrop
UpperCAmelCase : Union[str, Any] = decoder_layerdrop
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Dict = init_std
UpperCAmelCase : Union[str, Any] = use_cache
# Autoformer
UpperCAmelCase : Any = label_length
UpperCAmelCase : List[Any] = moving_average
UpperCAmelCase : Optional[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCAmelCase_ ( self : List[str] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 695 | 1 |
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowercase__ = logging.getLogger(__name__)
lowercase__ = 50 # max width of layer names
lowercase__ = 70 # max width of quantizer names
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=UpperCAmelCase_ , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=UpperCAmelCase_ , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=UpperCAmelCase_ , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=UpperCAmelCase_ , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=UpperCAmelCase_ , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=UpperCAmelCase_ , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def UpperCamelCase( UpperCAmelCase_ ):
if args.calibrator == "max":
UpperCAmelCase : Tuple = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
UpperCAmelCase : List[str] = 'histogram'
elif args.calibrator == "mse":
UpperCAmelCase : List[str] = 'histogram'
else:
raise ValueError(F"""Invalid calibrator {args.calibrator}""" )
UpperCAmelCase : Optional[Any] = QuantDescriptor(num_bits=args.aprec , calib_method=UpperCAmelCase_ )
UpperCAmelCase : List[str] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(UpperCAmelCase_ )
quant_nn.QuantLinear.set_default_quant_desc_weight(UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False , UpperCAmelCase_=False ):
logger.info('Configuring Model for Quantization' )
logger.info(F"""using quantization package {pytorch_quantization.__file__}""" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(UpperCAmelCase_ , ['embeddings'] , which='weight' , _disabled=UpperCAmelCase_ )
if args.quant_disable:
set_quantizer_by_name(UpperCAmelCase_ , [''] , _disabled=UpperCAmelCase_ )
if args.quant_disable_keyword:
set_quantizer_by_name(UpperCAmelCase_ , args.quant_disable_keyword , _disabled=UpperCAmelCase_ )
if args.quant_disable_layer_module:
set_quantizer_by_name(UpperCAmelCase_ , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=UpperCAmelCase_ )
if args.quant_enable_layer_module:
set_quantizer_by_name(UpperCAmelCase_ , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=UpperCAmelCase_ )
if args.recalibrate_weights:
recalibrate_weights(UpperCAmelCase_ )
if args.fuse_qkv:
fuse_qkv(UpperCAmelCase_ , UpperCAmelCase_ )
if args.clip_gelu:
clip_gelu(UpperCAmelCase_ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ ):
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"""{name:80}: {module}""" )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
def fusea(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
for mod in [qq, qk, qv]:
if not hasattr(UpperCAmelCase_ , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
UpperCAmelCase : Optional[Any] = qq._amax.detach().item()
UpperCAmelCase : str = qk._amax.detach().item()
UpperCAmelCase : List[str] = qv._amax.detach().item()
UpperCAmelCase : Union[str, Any] = max(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
qq._amax.fill_(UpperCAmelCase_ )
qk._amax.fill_(UpperCAmelCase_ )
qv._amax.fill_(UpperCAmelCase_ )
logger.info(F""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(F"""FUSE_QKV: {name:{name_width}}""" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
UpperCAmelCase : Dict = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=UpperCAmelCase_ )
UpperCAmelCase : Tuple = mod._input_quantizer._amax.data.detach().item()
logger.info(F"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" )
def UpperCamelCase( UpperCAmelCase_ ):
for name, mod in model.named_modules():
if hasattr(UpperCAmelCase_ , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
UpperCAmelCase : Optional[int] = mod.weight.shape[0]
UpperCAmelCase : Tuple = mod._weight_quantizer._amax.detach()
UpperCAmelCase : Any = torch.ones(UpperCAmelCase_ , dtype=amax.dtype , device=amax.device ) * amax
print(F"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" )
def UpperCamelCase( UpperCAmelCase_ ):
for name, mod in model.named_modules():
if hasattr(UpperCAmelCase_ , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
UpperCAmelCase : List[str] = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
UpperCAmelCase : List[str] = set(range(len(mod.weight.size() ) ) ) - axis_set
UpperCAmelCase : List[str] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=UpperCAmelCase_ , keepdims=UpperCAmelCase_ ).detach()
logger.info(F"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" )
UpperCAmelCase : Any = amax
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_=25 , UpperCAmelCase_=1_80 , UpperCAmelCase_=None ):
if ignore is None:
UpperCAmelCase : Union[str, Any] = []
elif not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : int = [ignore]
UpperCAmelCase : Tuple = 0
for name, mod in model.named_modules():
if not hasattr(UpperCAmelCase_ , 'weight' ):
continue
UpperCAmelCase : Dict = max(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
for name, mod in model.named_modules():
UpperCAmelCase : Tuple = getattr(UpperCAmelCase_ , '_input_quantizer' , UpperCAmelCase_ )
UpperCAmelCase : str = getattr(UpperCAmelCase_ , '_weight_quantizer' , UpperCAmelCase_ )
if not hasattr(UpperCAmelCase_ , 'weight' ):
continue
if type(UpperCAmelCase_ ) in ignore:
continue
if [True for s in ignore if type(UpperCAmelCase_ ) is str and s in name]:
continue
UpperCAmelCase : List[str] = F"""Act:{input_q.extra_repr()}"""
UpperCAmelCase : Optional[int] = F"""Wgt:{weight_q.extra_repr()}"""
UpperCAmelCase : Union[str, Any] = F"""{name:{name_width}} {act_str} {wgt_str}"""
if len(UpperCAmelCase_ ) <= line_width:
logger.info(UpperCAmelCase_ )
else:
logger.info(F"""{name:{name_width}} {act_str}""" )
logger.info(F"""{" ":{name_width}} {wgt_str}""" )
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Dict = 0
for name, mod in model.named_modules():
if isinstance(UpperCAmelCase_ , pytorch_quantization.nn.TensorQuantizer ):
print(F"""{name:80} {mod}""" )
count += 1
print(F"""{count} TensorQuantizers found in model""" )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Dict = getattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if quantizer_mod is not None:
assert hasattr(UpperCAmelCase_ , UpperCAmelCase_ )
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
logger.warning(F"""{name} has no {quantizer}""" )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_="both" , **UpperCAmelCase_ ):
UpperCAmelCase : Tuple = F"""Warning: changing {which} quantizers of {name:{qname_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
if which in ["input", "both"]:
set_quantizer(UpperCAmelCase_ , UpperCAmelCase_ , '_input_quantizer' , UpperCAmelCase_ , UpperCAmelCase_ )
if which in ["weight", "both"]:
set_quantizer(UpperCAmelCase_ , UpperCAmelCase_ , '_weight_quantizer' , UpperCAmelCase_ , UpperCAmelCase_ )
logger.info(UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ):
for name, mod in model.named_modules():
if hasattr(UpperCAmelCase_ , '_input_quantizer' ) or hasattr(UpperCAmelCase_ , '_weight_quantizer' ):
for n in names:
if re.search(UpperCAmelCase_ , UpperCAmelCase_ ):
set_quantizers(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = F"""Warning: changing {name:{name_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
logger.info(UpperCAmelCase_ )
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695 | 1 |
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowercase__ = datasets.utils.logging.get_logger(__name__)
class A_ ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
UpperCAmelCase_ : bool = None
UpperCAmelCase_ : bool = None
class A_ ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = datasets.Audio()
UpperCAmelCase_ : List[Any] = """audio"""
UpperCAmelCase_ : List[Any] = AudioFolderConfig
UpperCAmelCase_ : List[str] # definition at the bottom of the script
UpperCAmelCase_ : str = AudioClassification(audio_column="""audio""" , label_column="""label""" )
lowercase__ = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
lowercase__ = AUDIO_EXTENSIONS
| 695 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : int = sin(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = cos(UpperCAmelCase_ )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : Tuple = (1 - _cos) / 2
UpperCAmelCase : Dict = 1 - _cos
UpperCAmelCase : Union[str, Any] = 1 + alpha
UpperCAmelCase : List[str] = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha
UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[str] = cos(UpperCAmelCase_ )
UpperCAmelCase : Tuple = _sin / (2 * q_factor)
UpperCAmelCase : int = (1 + _cos) / 2
UpperCAmelCase : List[Any] = -1 - _cos
UpperCAmelCase : Optional[int] = 1 + alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : str = 1 - alpha
UpperCAmelCase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : List[Any] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : Any = _sin / (2 * q_factor)
UpperCAmelCase : Optional[Any] = _sin / 2
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : int = -ba
UpperCAmelCase : List[str] = 1 + alpha
UpperCAmelCase : int = -2 * _cos
UpperCAmelCase : Optional[int] = 1 - alpha
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : Tuple = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : int = 1 - alpha
UpperCAmelCase : Dict = -2 * _cos
UpperCAmelCase : Any = 1 + alpha
UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase : Union[str, Any] = 10 ** (gain_db / 40)
UpperCAmelCase : int = 1 + alpha * big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha * big_a
UpperCAmelCase : Tuple = 1 + alpha / big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : int = 1 - alpha / big_a
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Dict = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : Any = cos(UpperCAmelCase_ )
UpperCAmelCase : str = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : List[str] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : List[Any] = big_a * (pmc + aaa)
UpperCAmelCase : Optional[int] = 2 * big_a * mpc
UpperCAmelCase : Optional[int] = big_a * (pmc - aaa)
UpperCAmelCase : str = ppmc + aaa
UpperCAmelCase : int = -2 * pmpc
UpperCAmelCase : int = ppmc - aaa
UpperCAmelCase : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : List[Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : Optional[Any] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : Dict = big_a * (ppmc + aaa)
UpperCAmelCase : List[str] = -2 * big_a * pmpc
UpperCAmelCase : int = big_a * (ppmc - aaa)
UpperCAmelCase : Dict = pmc + aaa
UpperCAmelCase : Optional[int] = 2 * mpc
UpperCAmelCase : int = pmc - aaa
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 695 | 1 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = (EulerDiscreteScheduler,)
UpperCAmelCase_ : Dict = 10
def UpperCAmelCase_ ( self : Optional[int] , **lowercase_ : int ) -> str:
UpperCAmelCase : int = {
'num_train_timesteps': 1_100,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**lowercase_ )
return config
def UpperCAmelCase_ ( self : Any ) -> str:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
UpperCAmelCase : List[str] = self.scheduler_classes[0]
UpperCAmelCase : Tuple = self.get_scheduler_config()
UpperCAmelCase : Any = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : Any = torch.manual_seed(0 )
UpperCAmelCase : Dict = self.dummy_model()
UpperCAmelCase : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : Tuple = sample.to(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : Optional[int] = scheduler.scale_model_input(lowercase_ , lowercase_ )
UpperCAmelCase : List[str] = model(lowercase_ , lowercase_ )
UpperCAmelCase : Union[str, Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ )
UpperCAmelCase : List[str] = output.prev_sample
UpperCAmelCase : Tuple = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase : Dict = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def UpperCAmelCase_ ( self : List[Any] ) -> int:
UpperCAmelCase : Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase : Union[str, Any] = self.get_scheduler_config(prediction_type='v_prediction' )
UpperCAmelCase : List[Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = self.dummy_model()
UpperCAmelCase : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : List[str] = sample.to(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : List[str] = scheduler.scale_model_input(lowercase_ , lowercase_ )
UpperCAmelCase : Any = model(lowercase_ , lowercase_ )
UpperCAmelCase : str = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ )
UpperCAmelCase : Dict = output.prev_sample
UpperCAmelCase : List[Any] = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase : Dict = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 0.0002 ) < 1E-2
assert abs(result_mean.item() - 2.2676E-06 ) < 1E-3
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase : List[Any] = self.scheduler_classes[0]
UpperCAmelCase : List[str] = self.get_scheduler_config()
UpperCAmelCase : Optional[Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps , device=lowercase_ )
UpperCAmelCase : Dict = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = self.dummy_model()
UpperCAmelCase : Any = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase : Optional[int] = sample.to(lowercase_ )
for t in scheduler.timesteps:
UpperCAmelCase : int = scheduler.scale_model_input(lowercase_ , lowercase_ )
UpperCAmelCase : Optional[Any] = model(lowercase_ , lowercase_ )
UpperCAmelCase : Any = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ )
UpperCAmelCase : Tuple = output.prev_sample
UpperCAmelCase : Tuple = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase : str = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
UpperCAmelCase : Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase : Optional[int] = self.get_scheduler_config()
UpperCAmelCase : Optional[int] = scheduler_class(**lowercase_ , use_karras_sigmas=lowercase_ )
scheduler.set_timesteps(self.num_inference_steps , device=lowercase_ )
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : Dict = self.dummy_model()
UpperCAmelCase : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase : Optional[int] = sample.to(lowercase_ )
for t in scheduler.timesteps:
UpperCAmelCase : int = scheduler.scale_model_input(lowercase_ , lowercase_ )
UpperCAmelCase : Union[str, Any] = model(lowercase_ , lowercase_ )
UpperCAmelCase : List[Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ )
UpperCAmelCase : Dict = output.prev_sample
UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase : List[str] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1E-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1E-3
| 695 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowercase__ = TypeVar("T")
class A_ ( Generic[T] ):
'''simple docstring'''
UpperCAmelCase_ : deque[T] # Cache store of keys
UpperCAmelCase_ : set[T] # References of the keys in cache
UpperCAmelCase_ : int = 10 # Maximum capacity of cache
def __init__( self : List[Any] , lowercase_ : int ) -> None:
UpperCAmelCase : Any = deque()
UpperCAmelCase : Dict = set()
if not n:
UpperCAmelCase : Optional[int] = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCAmelCase : str = n
def UpperCAmelCase_ ( self : List[str] , lowercase_ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCAmelCase : Optional[Any] = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> None:
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Union[str, Any] ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 695 | 1 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = {}
UpperCAmelCase : Optional[Any] = tokenizer(example['content'] , truncation=UpperCAmelCase_ )['input_ids']
UpperCAmelCase : Any = len(example['content'] ) / len(output['input_ids'] )
return output
lowercase__ = HfArgumentParser(PretokenizationArguments)
lowercase__ = parser.parse_args()
if args.num_workers is None:
lowercase__ = multiprocessing.cpu_count()
lowercase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowercase__ = time.time()
lowercase__ = load_dataset(args.dataset_name, split="train")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
lowercase__ = time.time()
lowercase__ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
lowercase__ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 695 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
lowercase__ = {
"gpt-neox-20b": 2048,
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowercase_ : Any=None , lowercase_ : Dict=None , lowercase_ : List[str]=None , lowercase_ : List[Any]="<|endoftext|>" , lowercase_ : List[str]="<|endoftext|>" , lowercase_ : Any="<|endoftext|>" , lowercase_ : List[str]=False , **lowercase_ : Union[str, Any] , ) -> str:
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
UpperCAmelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase_ ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(lowercase_ , pre_tok_state.pop('type' ) )
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Tuple = pre_tok_class(**lowercase_ )
UpperCAmelCase : Any = add_prefix_space
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase : Optional[int] = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : "Conversation" ) -> List[int]:
UpperCAmelCase : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_ ) + [self.eos_token_id] )
if len(lowercase_ ) > self.model_max_length:
UpperCAmelCase : int = input_ids[-self.model_max_length :]
return input_ids
| 695 | 1 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.json"}
lowercase__ = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowercase__ = {"mgp-str": 27}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = VOCAB_FILES_NAMES
UpperCAmelCase_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : str , lowercase_ : Tuple , lowercase_ : List[Any]="[GO]" , lowercase_ : List[Any]="[GO]" , lowercase_ : str="[s]" , lowercase_ : List[str]="[GO]" , **lowercase_ : int ) -> Optional[int]:
super().__init__(
unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , **lowercase_ , )
with open(lowercase_ , encoding='utf-8' ) as vocab_handle:
UpperCAmelCase : Any = json.load(lowercase_ )
UpperCAmelCase : Tuple = {v: k for k, v in self.vocab.items()}
@property
def UpperCAmelCase_ ( self : Any ) -> int:
return len(self.vocab )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str ) -> Optional[Any]:
UpperCAmelCase : str = []
for s in text:
char_tokens.extend(lowercase_ )
return char_tokens
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : Optional[int] ) -> Optional[Any]:
return self.vocab.get(lowercase_ , self.vocab.get(self.unk_token ) )
def UpperCAmelCase_ ( self : int , lowercase_ : Union[str, Any] ) -> Optional[int]:
return self.decoder.get(lowercase_ )
def UpperCAmelCase_ ( self : int , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowercase_ ):
logger.error('Vocabulary path ({}) should be a directory'.format(lowercase_ ) )
return
UpperCAmelCase : Any = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(lowercase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + '\n' )
return (vocab_file,)
| 695 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = """openai/whisper-base"""
UpperCAmelCase_ : Union[str, Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
UpperCAmelCase_ : Dict = """transcriber"""
UpperCAmelCase_ : int = WhisperProcessor
UpperCAmelCase_ : Optional[int] = WhisperForConditionalGeneration
UpperCAmelCase_ : Dict = ["""audio"""]
UpperCAmelCase_ : Optional[int] = ["""text"""]
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str ) -> Optional[int]:
return self.pre_processor(lowercase_ , return_tensors='pt' ).input_features
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int ) -> List[str]:
return self.model.generate(inputs=lowercase_ )
def UpperCAmelCase_ ( self : str , lowercase_ : List[Any] ) -> List[str]:
return self.pre_processor.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )[0]
| 695 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = CanineTokenizer
UpperCAmelCase_ : List[str] = False
def UpperCAmelCase_ ( self : Tuple ) -> str:
super().setUp()
UpperCAmelCase : str = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
return CanineTokenizer.from_pretrained('google/canine-s' )
def UpperCAmelCase_ ( self : Any , **lowercase_ : Any ) -> CanineTokenizer:
UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
UpperCAmelCase : Tuple = 1_024
return tokenizer
@require_torch
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
UpperCAmelCase : Union[str, Any] = self.canine_tokenizer
UpperCAmelCase : List[str] = ['Life is like a box of chocolates.', 'You never know what you\'re gonna get.']
# fmt: off
UpperCAmelCase : Tuple = [57_344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57_345, 0, 0, 0, 0]
# fmt: on
UpperCAmelCase : List[Any] = tokenizer(lowercase_ , padding=lowercase_ , return_tensors='pt' )
self.assertIsInstance(lowercase_ , lowercase_ )
UpperCAmelCase : Any = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowercase_ , lowercase_ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def UpperCAmelCase_ ( self : List[str] ) -> int:
UpperCAmelCase : List[Any] = self.canine_tokenizer
UpperCAmelCase : int = ['Once there was a man.', 'He wrote a test in HuggingFace Tranformers.']
UpperCAmelCase : int = tokenizer(lowercase_ , padding=lowercase_ , return_tensors='pt' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('input_ids' , lowercase_ )
self.assertIn('attention_mask' , lowercase_ )
self.assertIn('token_type_ids' , lowercase_ )
@require_torch
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
UpperCAmelCase : List[str] = self.canine_tokenizer
UpperCAmelCase : Dict = [
'What\'s the weater?',
'It\'s about 25 degrees.',
]
UpperCAmelCase : Tuple = tokenizer(
text_target=lowercase_ , max_length=32 , padding='max_length' , truncation=lowercase_ , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def UpperCAmelCase_ ( self : str ) -> List[str]:
# safety check on max_len default value so we are sure the test works
UpperCAmelCase : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCAmelCase : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase : int = tempfile.mkdtemp()
UpperCAmelCase : Any = ' He is very happy, UNwant\u00E9d,running'
UpperCAmelCase : Dict = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
tokenizer.save_pretrained(lowercase_ )
UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(lowercase_ )
UpperCAmelCase : Dict = after_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
shutil.rmtree(lowercase_ )
UpperCAmelCase : Any = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase : Any = tempfile.mkdtemp()
UpperCAmelCase : Tuple = ' He is very happy, UNwant\u00E9d,running'
UpperCAmelCase : Union[str, Any] = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
UpperCAmelCase : Optional[Any] = chr(0xE007 )
additional_special_tokens.append(lowercase_ )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
UpperCAmelCase : int = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
tokenizer.save_pretrained(lowercase_ )
UpperCAmelCase : List[str] = tokenizer.__class__.from_pretrained(lowercase_ )
UpperCAmelCase : Tuple = after_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
self.assertIn(lowercase_ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCAmelCase : List[Any] = tokenizer.__class__.from_pretrained(lowercase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase : Optional[Any] = self.get_tokenizers(do_lower_case=lowercase_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase , UpperCAmelCase : Tuple = self.get_clean_sequence(lowercase_ )
# a special token for Canine can be defined as follows:
UpperCAmelCase : List[str] = 0xE005
UpperCAmelCase : Optional[Any] = chr(lowercase_ )
tokenizer.add_special_tokens({'cls_token': special_token} )
UpperCAmelCase : Any = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(len(lowercase_ ) , 1 )
UpperCAmelCase : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowercase_ )
UpperCAmelCase : Tuple = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
UpperCAmelCase : int = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
UpperCAmelCase : List[Any] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(lowercase_ , input_encoded + special_token_id )
UpperCAmelCase : Any = tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
UpperCAmelCase : int = self.get_tokenizers(do_lower_case=lowercase_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase : Union[str, Any] = chr(0xE005 )
UpperCAmelCase : List[Any] = chr(0xE006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowercase_ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'additional_special_tokens': [SPECIAL_TOKEN_2]} )
UpperCAmelCase : List[Any] = tokenizer.tokenize(lowercase_ )
UpperCAmelCase : str = tokenizer.tokenize(lowercase_ )
self.assertEqual(len(lowercase_ ) , 1 )
self.assertEqual(len(lowercase_ ) , 1 )
self.assertEqual(token_a[0] , lowercase_ )
self.assertEqual(token_a[0] , lowercase_ )
@require_tokenizers
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase : Dict = self.get_tokenizers(do_lower_case=lowercase_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# a special token for Canine can be defined as follows:
UpperCAmelCase : int = 0xE006
UpperCAmelCase : Dict = chr(lowercase_ )
UpperCAmelCase : Union[str, Any] = AddedToken(lowercase_ , lstrip=lowercase_ )
tokenizer.add_special_tokens({'additional_special_tokens': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(lowercase_ )
tokenizer.from_pretrained(lowercase_ )
def UpperCAmelCase_ ( self : List[Any] ) -> int:
UpperCAmelCase : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase_ )
with open(os.path.join(lowercase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
UpperCAmelCase : List[str] = json.load(lowercase_ )
with open(os.path.join(lowercase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
UpperCAmelCase : Dict = json.load(lowercase_ )
# a special token for Canine can be defined as follows:
UpperCAmelCase : Optional[Any] = 0xE006
UpperCAmelCase : List[Any] = chr(lowercase_ )
UpperCAmelCase : Union[str, Any] = [new_token_a]
UpperCAmelCase : List[str] = [new_token_a]
with open(os.path.join(lowercase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase_ , lowercase_ )
with open(os.path.join(lowercase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase_ , lowercase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase : List[str] = tokenizer_class.from_pretrained(lowercase_ , extra_ids=0 )
self.assertIn(lowercase_ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
UpperCAmelCase : Tuple = 0xE007
UpperCAmelCase : str = chr(lowercase_ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase : Union[str, Any] = [AddedToken(lowercase_ , lstrip=lowercase_ )]
UpperCAmelCase : Optional[Any] = tokenizer_class.from_pretrained(
lowercase_ , additional_special_tokens=lowercase_ , extra_ids=0 )
self.assertIn(lowercase_ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
UpperCAmelCase : Optional[Any] = self.get_tokenizers(do_lower_case=lowercase_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase : int = 'hello world'
if self.space_between_special_tokens:
UpperCAmelCase : Dict = '[CLS] hello world [SEP]'
else:
UpperCAmelCase : Optional[Any] = input
UpperCAmelCase : Union[str, Any] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
UpperCAmelCase : int = tokenizer.decode(lowercase_ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(lowercase_ , [output, output.lower()] )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase : List[str] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
UpperCAmelCase : Dict = 'a'
UpperCAmelCase : Optional[int] = ord(lowercase_ )
for attr in attributes_list:
setattr(lowercase_ , attr + '_id' , lowercase_ )
self.assertEqual(getattr(lowercase_ , lowercase_ ) , lowercase_ )
self.assertEqual(getattr(lowercase_ , attr + '_id' ) , lowercase_ )
setattr(lowercase_ , attr + '_id' , lowercase_ )
self.assertEqual(getattr(lowercase_ , lowercase_ ) , lowercase_ )
self.assertEqual(getattr(lowercase_ , attr + '_id' ) , lowercase_ )
setattr(lowercase_ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(lowercase_ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(lowercase_ , 'additional_special_tokens_ids' ) , [] )
UpperCAmelCase : Optional[Any] = 0xE006
UpperCAmelCase : List[Any] = chr(lowercase_ )
setattr(lowercase_ , 'additional_special_tokens_ids' , [additional_special_token_id] )
self.assertListEqual(getattr(lowercase_ , 'additional_special_tokens' ) , [additional_special_token] )
self.assertListEqual(getattr(lowercase_ , 'additional_special_tokens_ids' ) , [additional_special_token_id] )
def UpperCAmelCase_ ( self : List[Any] ) -> str:
pass
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
pass
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
pass
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
pass
def UpperCAmelCase_ ( self : Dict ) -> int:
pass
def UpperCAmelCase_ ( self : Dict ) -> str:
pass
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
pass
| 695 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """whisper"""
UpperCAmelCase_ : Tuple = ["""past_key_values"""]
UpperCAmelCase_ : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : str , lowercase_ : Any=51_865 , lowercase_ : List[Any]=80 , lowercase_ : int=6 , lowercase_ : Dict=4 , lowercase_ : List[Any]=6 , lowercase_ : Any=4 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : List[Any]=50_257 , lowercase_ : Optional[int]=True , lowercase_ : Any=True , lowercase_ : str="gelu" , lowercase_ : List[str]=256 , lowercase_ : str=0.0 , lowercase_ : Any=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=False , lowercase_ : Union[str, Any]=1_500 , lowercase_ : List[Any]=448 , lowercase_ : int=50_256 , lowercase_ : Union[str, Any]=50_256 , lowercase_ : List[Any]=50_256 , lowercase_ : Tuple=None , lowercase_ : Optional[Any]=[220, 50_256] , lowercase_ : Tuple=False , lowercase_ : str=256 , lowercase_ : Optional[Any]=False , lowercase_ : List[Any]=0.05 , lowercase_ : Any=10 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=10 , lowercase_ : int=0 , lowercase_ : Optional[int]=7 , **lowercase_ : Union[str, Any] , ) -> List[str]:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Any = num_mel_bins
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : str = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_layers
UpperCAmelCase : Any = decoder_attention_heads
UpperCAmelCase : Tuple = decoder_ffn_dim
UpperCAmelCase : List[str] = encoder_ffn_dim
UpperCAmelCase : int = dropout
UpperCAmelCase : int = attention_dropout
UpperCAmelCase : List[Any] = activation_dropout
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Union[str, Any] = init_std
UpperCAmelCase : Dict = encoder_layerdrop
UpperCAmelCase : str = decoder_layerdrop
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase : Tuple = max_source_positions
UpperCAmelCase : List[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase : Optional[int] = classifier_proj_size
UpperCAmelCase : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Optional[Any] = apply_spec_augment
UpperCAmelCase : Optional[Any] = mask_time_prob
UpperCAmelCase : Optional[Any] = mask_time_length
UpperCAmelCase : str = mask_time_min_masks
UpperCAmelCase : List[str] = mask_feature_prob
UpperCAmelCase : Tuple = mask_feature_length
UpperCAmelCase : Optional[int] = mask_feature_min_masks
UpperCAmelCase : str = median_filter_width
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , suppress_tokens=lowercase_ , begin_suppress_tokens=lowercase_ , **lowercase_ , )
class A_ ( _snake_case ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase : Optional[int] = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase : int = {0: 'batch'}
else:
UpperCAmelCase : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='inputs' )
return common_inputs
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , lowercase_ : int = 22_050 , lowercase_ : float = 5.0 , lowercase_ : int = 220 , ) -> Mapping[str, Any]:
UpperCAmelCase : Tuple = OrderedDict()
UpperCAmelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase_ , framework=lowercase_ , sampling_rate=lowercase_ , time_duration=lowercase_ , frequency=lowercase_ , )
UpperCAmelCase : Optional[Any] = encoder_inputs['input_features'].shape[2]
UpperCAmelCase : Tuple = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase : Optional[int] = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Dict = encoder_inputs.pop('input_features' )
UpperCAmelCase : List[str] = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase : Union[str, Any] = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def UpperCAmelCase_ ( self : Dict ) -> float:
return 1E-3
| 695 | 1 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = LongformerTokenizer
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : List[Any] = LongformerTokenizerFast
UpperCAmelCase_ : int = True
def UpperCAmelCase_ ( self : Any ) -> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase : List[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
UpperCAmelCase : Any = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
UpperCAmelCase : Dict = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCAmelCase : Optional[Any] = {'unk_token': '<unk>'}
UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowercase_ ) )
def UpperCAmelCase_ ( self : Any , **lowercase_ : List[str] ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase_ ( self : Tuple , **lowercase_ : Optional[Any] ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase_ ( self : str , lowercase_ : Optional[int] ) -> List[str]:
UpperCAmelCase : Dict = 'lower newer'
UpperCAmelCase : Any = 'lower newer'
return input_text, output_text
def UpperCAmelCase_ ( self : Any ) -> int:
UpperCAmelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase : Tuple = 'lower newer'
UpperCAmelCase : Optional[int] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
UpperCAmelCase : Optional[Any] = tokenizer.tokenize(lowercase_ ) # , add_prefix_space=True)
self.assertListEqual(lowercase_ , lowercase_ )
UpperCAmelCase : List[Any] = tokens + [tokenizer.unk_token]
UpperCAmelCase : List[str] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase : Dict = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=lowercase_ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=lowercase_ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
UpperCAmelCase : int = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
UpperCAmelCase : Dict = tokenizer.encode('sequence builders' , add_special_tokens=lowercase_ )
UpperCAmelCase : str = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase_ )
UpperCAmelCase : str = tokenizer.encode(
'sequence builders' , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
UpperCAmelCase : Any = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowercase_ )
UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCAmelCase_ ( self : Any ) -> str:
UpperCAmelCase : Dict = self.get_tokenizer()
UpperCAmelCase : List[Any] = 'Encode this sequence.'
UpperCAmelCase : List[Any] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
UpperCAmelCase : Any = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
UpperCAmelCase : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase_ , lowercase_ )
UpperCAmelCase : Tuple = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
UpperCAmelCase : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase_ , lowercase_ )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
UpperCAmelCase : Optional[Any] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase_ , lowercase_ )
# Testing spaces after special tokens
UpperCAmelCase : Any = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ )} ) # mask token has a left space
UpperCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(lowercase_ )
UpperCAmelCase : int = 'Encode <mask> sequence'
UpperCAmelCase : Optional[Any] = 'Encode <mask>sequence'
UpperCAmelCase : Dict = tokenizer.encode(lowercase_ )
UpperCAmelCase : str = encoded.index(lowercase_ )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase_ , lowercase_ )
UpperCAmelCase : List[str] = tokenizer.encode(lowercase_ )
UpperCAmelCase : List[str] = encoded.index(lowercase_ )
UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> int:
pass
def UpperCAmelCase_ ( self : str ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
UpperCAmelCase : Dict = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
UpperCAmelCase : Optional[int] = 'A, <mask> AllenNLP sentence.'
UpperCAmelCase : List[Any] = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
UpperCAmelCase : List[str] = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
UpperCAmelCase : Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
UpperCAmelCase : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowercase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowercase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
UpperCAmelCase : Any = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
UpperCAmelCase : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , lowercase_ )
self.assertEqual(post_processor_state['add_prefix_space'] , lowercase_ )
self.assertEqual(post_processor_state['trim_offsets'] , lowercase_ )
def UpperCAmelCase_ ( self : str ) -> str:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase : int = f"""{text_of_1_token} {text_of_1_token}"""
UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
UpperCAmelCase : Union[str, Any] = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ) + 1, len(lowercase_ ) + 1 + len(lowercase_ )) , )
UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
UpperCAmelCase : Any = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ) + 1, len(lowercase_ ) + 1 + len(lowercase_ )) , )
UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
UpperCAmelCase : Union[str, Any] = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ), len(lowercase_ ) + 1 + len(lowercase_ )) , )
UpperCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
UpperCAmelCase : List[str] = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ), len(lowercase_ ) + 1 + len(lowercase_ )) , )
UpperCAmelCase : Dict = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
UpperCAmelCase : Dict = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase_ ) + 1, 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , )
UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
UpperCAmelCase : Tuple = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase_ ), 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , )
UpperCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
UpperCAmelCase : List[Any] = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase_ ), 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , )
| 695 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowercase__ = "Create a default config file for Accelerate with only a few flags set."
def UpperCamelCase( UpperCAmelCase_="no" , UpperCAmelCase_ = default_json_config_file , UpperCAmelCase_ = False ):
UpperCAmelCase : Any = Path(UpperCAmelCase_ )
path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
UpperCAmelCase : Optional[int] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
UpperCAmelCase : Dict = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
UpperCAmelCase : Dict = torch.cuda.device_count()
UpperCAmelCase : List[Any] = num_gpus
UpperCAmelCase : List[Any] = False
if num_gpus > 1:
UpperCAmelCase : Tuple = 'MULTI_GPU'
else:
UpperCAmelCase : Optional[Any] = 'NO'
elif is_xpu_available() and use_xpu:
UpperCAmelCase : Optional[int] = torch.xpu.device_count()
UpperCAmelCase : Optional[int] = num_xpus
UpperCAmelCase : Any = False
if num_xpus > 1:
UpperCAmelCase : Tuple = 'MULTI_XPU'
else:
UpperCAmelCase : str = 'NO'
elif is_npu_available():
UpperCAmelCase : Optional[int] = torch.npu.device_count()
UpperCAmelCase : str = num_npus
UpperCAmelCase : int = False
if num_npus > 1:
UpperCAmelCase : int = 'MULTI_NPU'
else:
UpperCAmelCase : List[str] = 'NO'
else:
UpperCAmelCase : str = 0
UpperCAmelCase : int = True
UpperCAmelCase : str = 1
UpperCAmelCase : str = 'NO'
UpperCAmelCase : Any = ClusterConfig(**UpperCAmelCase_ )
config.to_json_file(UpperCAmelCase_ )
return path
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = parser.add_parser('default' , parents=UpperCAmelCase_ , help=UpperCAmelCase_ , formatter_class=UpperCAmelCase_ )
parser.add_argument(
'--config_file' , default=UpperCAmelCase_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=UpperCAmelCase_ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=UpperCAmelCase_ )
return parser
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 695 | 1 |
'''simple docstring'''
from typing import Any
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
_validation(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
# Creates data structures and fill initial step
UpperCAmelCase : dict = {}
UpperCAmelCase : dict = {}
for state in states_space:
UpperCAmelCase : int = observations_space[0]
UpperCAmelCase : str = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
UpperCAmelCase : Tuple = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(UpperCAmelCase_ ) ):
UpperCAmelCase : List[Any] = observations_space[o]
UpperCAmelCase : str = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
UpperCAmelCase : List[Any] = ''
UpperCAmelCase : str = -1
for k_state in states_space:
UpperCAmelCase : Dict = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
UpperCAmelCase : Dict = probability
UpperCAmelCase : List[str] = k_state
# Update probabilities and pointers dicts
UpperCAmelCase : List[Any] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
UpperCAmelCase : Dict = arg_max
# The final observation
UpperCAmelCase : str = observations_space[len(UpperCAmelCase_ ) - 1]
# argmax for given final observation
UpperCAmelCase : List[Any] = ''
UpperCAmelCase : str = -1
for k_state in states_space:
UpperCAmelCase : List[Any] = probabilities[(k_state, final_observation)]
if probability > max_probability:
UpperCAmelCase : Optional[int] = probability
UpperCAmelCase : Union[str, Any] = k_state
UpperCAmelCase : List[Any] = arg_max
# Process pointers backwards
UpperCAmelCase : List[str] = last_state
UpperCAmelCase : int = []
for o in range(len(UpperCAmelCase_ ) - 1 , -1 , -1 ):
result.append(UpperCAmelCase_ )
UpperCAmelCase : str = pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
_validate_not_empty(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
_validate_lists(UpperCAmelCase_ , UpperCAmelCase_ )
_validate_dicts(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('There\'s an empty parameter' )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
_validate_list(UpperCAmelCase_ , 'observations_space' )
_validate_list(UpperCAmelCase_ , 'states_space' )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if not isinstance(_object , UpperCAmelCase_ ):
UpperCAmelCase : List[str] = F"""{var_name} must be a list"""
raise ValueError(UpperCAmelCase_ )
else:
for x in _object:
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = F"""{var_name} must be a list of strings"""
raise ValueError(UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
_validate_dict(UpperCAmelCase_ , 'initial_probabilities' , UpperCAmelCase_ )
_validate_nested_dict(UpperCAmelCase_ , 'transition_probabilities' )
_validate_nested_dict(UpperCAmelCase_ , 'emission_probabilities' )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
_validate_dict(_object , UpperCAmelCase_ , UpperCAmelCase_ )
for x in _object.values():
_validate_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = False ):
if not isinstance(_object , UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = F"""{var_name} must be a dict"""
raise ValueError(UpperCAmelCase_ )
if not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for x in _object ):
UpperCAmelCase : List[Any] = F"""{var_name} all keys must be strings"""
raise ValueError(UpperCAmelCase_ )
if not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for x in _object.values() ):
UpperCAmelCase : List[str] = 'nested dictionary ' if nested else ''
UpperCAmelCase : Tuple = F"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(UpperCAmelCase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> None:
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 695 | 1 |
'''simple docstring'''
from maths.prime_factors import prime_factors
def UpperCamelCase( UpperCAmelCase_ ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = F"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCAmelCase_ )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(UpperCAmelCase_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowercase__ = logging.getLogger(__name__)
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : str
UpperCAmelCase_ : str
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[int]
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[Union[int, float]] = None
UpperCAmelCase_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : List[str] , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = None , lowercase_ : List[str]=False , lowercase_ : bool = False , ) -> Optional[Any]:
UpperCAmelCase : Dict = hans_processors[task]()
UpperCAmelCase : List[Any] = os.path.join(
lowercase_ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(lowercase_ ) , lowercase_ , ) , )
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase : int = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase : Tuple = torch.load(lowercase_ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase : int = (
processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
)
logger.info('Training examples: %s' , len(lowercase_ ) )
UpperCAmelCase : Dict = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
logger.info('Saving features into cached file %s' , lowercase_ )
torch.save(self.features , lowercase_ )
def __len__( self : Union[str, Any] ) -> str:
return len(self.features )
def __getitem__( self : Dict , lowercase_ : Dict ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : Tuple , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = 128 , lowercase_ : int=False , lowercase_ : bool = False , ) -> Union[str, Any]:
UpperCAmelCase : int = hans_processors[task]()
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Any = label_list
UpperCAmelCase : str = processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
UpperCAmelCase : int = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(lowercase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase : Optional[Any] = tf.data.Dataset.from_generator(
lowercase_ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
return self.dataset
def __len__( self : Tuple ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : List[Any] , lowercase_ : Union[str, Any] ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
return self.label_list
class A_ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int , lowercase_ : Optional[int] ) -> Any:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_train_set.txt' ) ) , 'train' )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> List[str]:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
return ["contradiction", "entailment", "neutral"]
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : str ) -> Dict:
UpperCAmelCase : Union[str, Any] = []
for i, line in enumerate(lowercase_ ):
if i == 0:
continue
UpperCAmelCase : Tuple = '%s-%s' % (set_type, line[0])
UpperCAmelCase : Tuple = line[5]
UpperCAmelCase : Dict = line[6]
UpperCAmelCase : Optional[Any] = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCAmelCase : Optional[Any] = line[0]
examples.append(InputExample(guid=lowercase_ , text_a=lowercase_ , text_b=lowercase_ , label=lowercase_ , pairID=lowercase_ ) )
return examples
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
UpperCAmelCase : List[str] = {label: i for i, label in enumerate(UpperCAmelCase_ )}
UpperCAmelCase : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(UpperCAmelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
UpperCAmelCase : int = tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , truncation=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , )
UpperCAmelCase : List[str] = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase : Any = int(example.pairID )
features.append(InputFeatures(**UpperCAmelCase_ , label=UpperCAmelCase_ , pairID=UpperCAmelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
lowercase__ = {
"hans": 3,
}
lowercase__ = {
"hans": HansProcessor,
}
| 695 | 1 |
'''simple docstring'''
from __future__ import annotations
lowercase__ = [True] * 1000001
lowercase__ = 2
while i * i <= 1000000:
if seive[i]:
for j in range(i * i, 1000001, i):
lowercase__ = False
i += 1
def UpperCamelCase( UpperCAmelCase_ ):
return seive[n]
def UpperCamelCase( UpperCAmelCase_ ):
return any(digit in '02468' for digit in str(UpperCAmelCase_ ) )
def UpperCamelCase( UpperCAmelCase_ = 1_00_00_00 ):
UpperCAmelCase : Any = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(UpperCAmelCase_ ) and not contains_an_even_digit(UpperCAmelCase_ ):
UpperCAmelCase : Tuple = str(UpperCAmelCase_ )
UpperCAmelCase : Tuple = [int(str_num[j:] + str_num[:j] ) for j in range(len(UpperCAmelCase_ ) )]
if all(is_prime(UpperCAmelCase_ ) for i in list_nums ):
result.append(UpperCAmelCase_ )
return result
def UpperCamelCase( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f'''{len(find_circular_primes()) = }''')
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase , UpperCAmelCase : Any = 1, 1
UpperCAmelCase : Any = []
for i in range(1 , n + 1 ):
UpperCAmelCase : Tuple = prev_numerator + 2 * prev_denominator
UpperCAmelCase : Any = prev_numerator + prev_denominator
if len(str(UpperCAmelCase_ ) ) > len(str(UpperCAmelCase_ ) ):
result.append(UpperCAmelCase_ )
UpperCAmelCase : Dict = numerator
UpperCAmelCase : Dict = denominator
return len(UpperCAmelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """open-llama"""
def __init__( self : str , lowercase_ : int=100_000 , lowercase_ : Dict=4_096 , lowercase_ : List[Any]=11_008 , lowercase_ : List[Any]=32 , lowercase_ : List[Any]=32 , lowercase_ : List[Any]="silu" , lowercase_ : int=2_048 , lowercase_ : List[str]=0.02 , lowercase_ : Any=1E-6 , lowercase_ : Union[str, Any]=True , lowercase_ : str=0 , lowercase_ : str=1 , lowercase_ : Tuple=2 , lowercase_ : Union[str, Any]=False , lowercase_ : int=True , lowercase_ : Optional[int]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Tuple=True , lowercase_ : Dict=True , lowercase_ : str=None , **lowercase_ : Dict , ) -> List[str]:
UpperCAmelCase : Dict = vocab_size
UpperCAmelCase : Optional[Any] = max_position_embeddings
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : Optional[int] = intermediate_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Any = rms_norm_eps
UpperCAmelCase : int = use_cache
UpperCAmelCase : Optional[Any] = kwargs.pop(
'use_memorry_efficient_attention' , lowercase_ )
UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase : Tuple = attention_dropout_prob
UpperCAmelCase : Optional[int] = use_stable_embedding
UpperCAmelCase : Union[str, Any] = shared_input_output_embedding
UpperCAmelCase : Optional[int] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ , )
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f"""got {self.rope_scaling}""" )
UpperCAmelCase : List[str] = self.rope_scaling.get('type' , lowercase_ )
UpperCAmelCase : Optional[int] = self.rope_scaling.get('factor' , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase : List[Any] = 2**power
UpperCAmelCase : List[Any] = 0
while n:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 695 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 695 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = """blip_2_vision_model"""
def __init__( self : List[str] , lowercase_ : int=1_408 , lowercase_ : Tuple=6_144 , lowercase_ : Dict=39 , lowercase_ : Optional[int]=16 , lowercase_ : str=224 , lowercase_ : Any=14 , lowercase_ : int="gelu" , lowercase_ : int=0.0_0001 , lowercase_ : Optional[int]=0.0 , lowercase_ : Dict=1E-10 , lowercase_ : List[str]=True , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
super().__init__(**lowercase_ )
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : str = patch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : str = qkv_bias
@classmethod
def UpperCAmelCase_ ( cls : List[str] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip_2_qformer"""
def __init__( self : Tuple , lowercase_ : Union[str, Any]=30_522 , lowercase_ : int=768 , lowercase_ : Dict=12 , lowercase_ : Dict=12 , lowercase_ : int=3_072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Tuple=512 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=1E-12 , lowercase_ : str=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Optional[int]=2 , lowercase_ : str=1_408 , **lowercase_ : Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Any = layer_norm_eps
UpperCAmelCase : Dict = position_embedding_type
UpperCAmelCase : Any = cross_attention_frequency
UpperCAmelCase : Any = encoder_hidden_size
@classmethod
def UpperCAmelCase_ ( cls : List[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : List[str] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip-2"""
UpperCAmelCase_ : Any = True
def __init__( self : Union[str, Any] , lowercase_ : List[Any]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=None , lowercase_ : Dict=32 , **lowercase_ : Union[str, Any] ) -> Any:
super().__init__(**lowercase_ )
if vision_config is None:
UpperCAmelCase : Union[str, Any] = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
UpperCAmelCase : int = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
UpperCAmelCase : Dict = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
UpperCAmelCase : str = BlipaVisionConfig(**lowercase_ )
UpperCAmelCase : str = BlipaQFormerConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
UpperCAmelCase : int = CONFIG_MAPPING[text_model_type](**lowercase_ )
UpperCAmelCase : Optional[int] = self.text_config.tie_word_embeddings
UpperCAmelCase : Dict = self.text_config.is_encoder_decoder
UpperCAmelCase : Tuple = num_query_tokens
UpperCAmelCase : Tuple = self.vision_config.hidden_size
UpperCAmelCase : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase : Union[str, Any] = 1.0
UpperCAmelCase : Union[str, Any] = 0.02
@classmethod
def UpperCAmelCase_ ( cls : Dict , lowercase_ : BlipaVisionConfig , lowercase_ : BlipaQFormerConfig , lowercase_ : PretrainedConfig , **lowercase_ : List[Any] , ) -> Tuple:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowercase_ , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
UpperCAmelCase : Optional[int] = self.qformer_config.to_dict()
UpperCAmelCase : List[str] = self.text_config.to_dict()
UpperCAmelCase : str = self.__class__.model_type
return output
| 695 | 1 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = GPTSwaTokenizer
UpperCAmelCase_ : int = False
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : str = False
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : Optional[int] = GPTSwaTokenizer(lowercase_ , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self : Any , lowercase_ : Tuple ) -> Optional[int]:
UpperCAmelCase : Dict = 'This is a test'
UpperCAmelCase : Tuple = 'This is a test'
return input_text, output_text
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase : Optional[Any] = '<s>'
UpperCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(lowercase_ ) , 2_000 )
def UpperCAmelCase_ ( self : str ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 2_000 )
def UpperCAmelCase_ ( self : Dict ) -> Any:
UpperCAmelCase : Optional[int] = GPTSwaTokenizer(lowercase_ )
UpperCAmelCase : Dict = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , [465, 287, 265, 631, 842] )
UpperCAmelCase : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
lowercase_ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
UpperCAmelCase : int = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(lowercase_ )
# fmt: off
self.assertListEqual(
lowercase_ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def UpperCAmelCase_ ( self : Tuple ) -> str:
UpperCAmelCase : Tuple = GPTSwaTokenizer(lowercase_ )
UpperCAmelCase : int = ['This is a test', 'I was born in 92000, and this is falsé.']
UpperCAmelCase : List[str] = [
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(lowercase_ , lowercase_ ):
self.assertListEqual(tokenizer.encode_fast(lowercase_ ) , lowercase_ )
# Test that decode_fast returns the input text
for text, token_ids in zip(lowercase_ , lowercase_ ):
self.assertEqual(tokenizer.decode_fast(lowercase_ ) , lowercase_ )
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase : List[Any] = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
UpperCAmelCase : Union[str, Any] = {'input_ids': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='AI-Sweden/gpt-sw3-126m' , sequences=lowercase_ , )
| 695 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase__ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
lowercase__ = json.load(f)
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict , lowercase_ : Dict ) -> Tuple:
return FSMTTokenizer.from_pretrained(lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> Tuple:
UpperCAmelCase : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(lowercase_ ).to(lowercase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int , lowercase_ : Any ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCAmelCase : List[str] = f"""facebook/wmt19-{pair}"""
UpperCAmelCase : Optional[int] = self.get_tokenizer(lowercase_ )
UpperCAmelCase : int = self.get_model(lowercase_ )
UpperCAmelCase : List[Any] = bleu_data[pair]['src']
UpperCAmelCase : Optional[int] = bleu_data[pair]['tgt']
UpperCAmelCase : Any = tokenizer(lowercase_ , return_tensors='pt' , truncation=lowercase_ , padding='longest' ).to(lowercase_ )
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase : List[Any] = tokenizer.batch_decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
UpperCAmelCase : Any = calculate_bleu(lowercase_ , lowercase_ )
print(lowercase_ )
self.assertGreaterEqual(scores['bleu'] , lowercase_ )
| 695 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[Any] = PegasusConfig
UpperCAmelCase_ : Union[str, Any] = {}
UpperCAmelCase_ : Any = """gelu"""
def __init__( self : str , lowercase_ : str , lowercase_ : int=13 , lowercase_ : Dict=7 , lowercase_ : Optional[Any]=True , lowercase_ : int=False , lowercase_ : Tuple=99 , lowercase_ : str=32 , lowercase_ : Tuple=2 , lowercase_ : int=4 , lowercase_ : Optional[int]=37 , lowercase_ : Any=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[Any]=40 , lowercase_ : Tuple=2 , lowercase_ : Union[str, Any]=1 , lowercase_ : List[str]=0 , ) -> List[str]:
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : Any = batch_size
UpperCAmelCase : str = seq_length
UpperCAmelCase : List[Any] = is_training
UpperCAmelCase : List[Any] = use_labels
UpperCAmelCase : Union[str, Any] = vocab_size
UpperCAmelCase : Optional[Any] = hidden_size
UpperCAmelCase : Tuple = num_hidden_layers
UpperCAmelCase : List[str] = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : Dict = hidden_dropout_prob
UpperCAmelCase : List[str] = attention_probs_dropout_prob
UpperCAmelCase : Dict = max_position_embeddings
UpperCAmelCase : List[Any] = eos_token_id
UpperCAmelCase : Any = pad_token_id
UpperCAmelCase : Union[str, Any] = bos_token_id
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase : str = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase : Optional[Any] = prepare_pegasus_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
return config, inputs_dict
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : List[Any] ) -> Optional[Any]:
UpperCAmelCase : Dict = TFPegasusModel(config=lowercase_ ).get_decoder()
UpperCAmelCase : Optional[int] = inputs_dict['input_ids']
UpperCAmelCase : int = input_ids[:1, :]
UpperCAmelCase : Dict = inputs_dict['attention_mask'][:1, :]
UpperCAmelCase : Optional[Any] = inputs_dict['head_mask']
UpperCAmelCase : Optional[Any] = 1
# first forward pass
UpperCAmelCase : List[str] = model(lowercase_ , attention_mask=lowercase_ , head_mask=lowercase_ , use_cache=lowercase_ )
UpperCAmelCase , UpperCAmelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase : str = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase : str = model(lowercase_ , attention_mask=lowercase_ )[0]
UpperCAmelCase : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase : List[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1E-3 )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , ):
if attention_mask is None:
UpperCAmelCase : Tuple = tf.cast(tf.math.not_equal(UpperCAmelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase : Optional[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A_ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCAmelCase_ : Union[str, Any] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase_ : Tuple = (
{
"""conversational""": TFPegasusForConditionalGeneration,
"""feature-extraction""": TFPegasusModel,
"""summarization""": TFPegasusForConditionalGeneration,
"""text2text-generation""": TFPegasusForConditionalGeneration,
"""translation""": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Union[str, Any] = False
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
UpperCAmelCase : Dict = TFPegasusModelTester(self )
UpperCAmelCase : Any = ConfigTester(self , config_class=lowercase_ )
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
UpperCAmelCase_ : Optional[Any] = [
"""California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"""
""" reduce the risk of wildfires.""",
"""N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCAmelCase_ : Tuple = """google/pegasus-xsum"""
@cached_property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase_ ( self : Dict ) -> str:
UpperCAmelCase : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCAmelCase_ ( self : Any , **lowercase_ : str ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = self.translate_src_text(**lowercase_ )
assert self.expected_text == generated_words
def UpperCAmelCase_ ( self : Optional[Any] , **lowercase_ : Optional[int] ) -> str:
UpperCAmelCase : Any = self.tokenizer(self.src_text , **lowercase_ , padding=lowercase_ , return_tensors='tf' )
UpperCAmelCase : Dict = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase_ , )
UpperCAmelCase : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase_ )
return generated_words
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
self._assert_generated_batch_equal_expected()
| 695 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """pix2struct_text_model"""
UpperCAmelCase_ : Union[str, Any] = ["""past_key_values"""]
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : int , lowercase_ : str=50_244 , lowercase_ : Tuple=768 , lowercase_ : List[Any]=64 , lowercase_ : List[Any]=2_048 , lowercase_ : Optional[Any]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=32 , lowercase_ : List[str]=128 , lowercase_ : List[Any]=0.1 , lowercase_ : List[str]=1E-6 , lowercase_ : Union[str, Any]=1.0 , lowercase_ : Dict="gelu_new" , lowercase_ : Any=0 , lowercase_ : Any=False , lowercase_ : List[Any]=0 , lowercase_ : Tuple=1 , lowercase_ : List[str]=False , lowercase_ : List[Any]=True , **lowercase_ : Union[str, Any] , ) -> Dict:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : int = hidden_size
UpperCAmelCase : List[Any] = d_kv
UpperCAmelCase : Any = d_ff
UpperCAmelCase : List[str] = num_layers
UpperCAmelCase : str = num_heads
UpperCAmelCase : List[Any] = relative_attention_num_buckets
UpperCAmelCase : Tuple = relative_attention_max_distance
UpperCAmelCase : str = dropout_rate
UpperCAmelCase : Optional[int] = layer_norm_epsilon
UpperCAmelCase : int = initializer_factor
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : List[Any] = eos_token_id
UpperCAmelCase : Union[str, Any] = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase : List[str] = dense_act_fn
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , tie_word_embeddings=lowercase_ , is_decoder=lowercase_ , **lowercase_ , )
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : str = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : Any = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = """pix2struct_vision_model"""
def __init__( self : str , lowercase_ : Any=768 , lowercase_ : Union[str, Any]=768 , lowercase_ : Union[str, Any]=2_048 , lowercase_ : Tuple=64 , lowercase_ : Dict=12 , lowercase_ : Optional[int]=12 , lowercase_ : int="gelu_new" , lowercase_ : List[Any]=1E-6 , lowercase_ : Optional[int]=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : str=1E-10 , lowercase_ : Dict=1.0 , lowercase_ : int=4_096 , lowercase_ : Tuple=32 , lowercase_ : Any=128 , **lowercase_ : Any , ) -> Tuple:
super().__init__(**lowercase_ )
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : Any = patch_embed_hidden_size
UpperCAmelCase : Optional[int] = d_ff
UpperCAmelCase : Dict = dropout_rate
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : str = initializer_factor
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : Union[str, Any] = dense_act_fn
UpperCAmelCase : Dict = seq_len
UpperCAmelCase : Optional[int] = relative_attention_num_buckets
UpperCAmelCase : Union[str, Any] = relative_attention_max_distance
UpperCAmelCase : str = d_kv
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Any ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Tuple = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : List[str] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """pix2struct"""
UpperCAmelCase_ : Dict = True
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : int=None , lowercase_ : Optional[Any]=1.0 , lowercase_ : List[str]=0.02 , lowercase_ : str=False , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , **lowercase_ : Optional[Any] , ) -> str:
super().__init__(tie_word_embeddings=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ )
if text_config is None:
UpperCAmelCase : Optional[int] = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
UpperCAmelCase : List[str] = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
UpperCAmelCase : Optional[Any] = PixaStructTextConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = PixaStructVisionConfig(**lowercase_ )
UpperCAmelCase : Optional[Any] = self.text_config.decoder_start_token_id
UpperCAmelCase : str = self.text_config.pad_token_id
UpperCAmelCase : Optional[int] = self.text_config.eos_token_id
UpperCAmelCase : Union[str, Any] = initializer_factor
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : str = is_vqa
@classmethod
def UpperCAmelCase_ ( cls : Tuple , lowercase_ : PixaStructTextConfig , lowercase_ : PixaStructVisionConfig , **lowercase_ : str ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.text_config.to_dict()
UpperCAmelCase : Dict = self.vision_config.to_dict()
UpperCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 695 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
lowercase__ = "2020.9.26"
lowercase__ = "xcodz-dot, cclaus, dhruvmanila"
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if not all(isinstance(UpperCAmelCase_ , (float, int) ) for val in locals().values() ):
UpperCAmelCase : List[Any] = F"""Input values must either be float or int: {list(locals().values() )}"""
raise TypeError(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = ((x * distance) / (z + distance)) * scale
UpperCAmelCase : Optional[int] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError('Axis must be a str' )
UpperCAmelCase : Dict = locals()
del input_variables["axis"]
if not all(isinstance(UpperCAmelCase_ , (float, int) ) for val in input_variables.values() ):
UpperCAmelCase : int = (
'Input values except axis must either be float or int: '
F"""{list(input_variables.values() )}"""
)
raise TypeError(UpperCAmelCase_ )
UpperCAmelCase : Dict = (angle % 3_60) / 4_50 * 1_80 / math.pi
if axis == "z":
UpperCAmelCase : Any = x * math.cos(UpperCAmelCase_ ) - y * math.sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = y * math.cos(UpperCAmelCase_ ) + x * math.sin(UpperCAmelCase_ )
UpperCAmelCase : List[str] = z
elif axis == "x":
UpperCAmelCase : List[Any] = y * math.cos(UpperCAmelCase_ ) - z * math.sin(UpperCAmelCase_ )
UpperCAmelCase : str = z * math.cos(UpperCAmelCase_ ) + y * math.sin(UpperCAmelCase_ )
UpperCAmelCase : Tuple = x
elif axis == "y":
UpperCAmelCase : Union[str, Any] = x * math.cos(UpperCAmelCase_ ) - z * math.sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = z * math.cos(UpperCAmelCase_ ) + x * math.sin(UpperCAmelCase_ )
UpperCAmelCase : Union[str, Any] = y
else:
raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }''')
print(f'''{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }''')
| 695 |
'''simple docstring'''
import baseaa
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baaencode(string.encode('utf-8' ) )
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baadecode(UpperCAmelCase_ ).decode('utf-8' )
if __name__ == "__main__":
lowercase__ = "Hello World!"
lowercase__ = baseaa_encode(test)
print(encoded)
lowercase__ = baseaa_decode(encoded)
print(decoded)
| 695 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowercase__ = logging.getLogger(__name__)
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : str
UpperCAmelCase_ : str
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[int]
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[Union[int, float]] = None
UpperCAmelCase_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : List[str] , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = None , lowercase_ : List[str]=False , lowercase_ : bool = False , ) -> Optional[Any]:
UpperCAmelCase : Dict = hans_processors[task]()
UpperCAmelCase : List[Any] = os.path.join(
lowercase_ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(lowercase_ ) , lowercase_ , ) , )
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase : int = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase : Tuple = torch.load(lowercase_ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase : int = (
processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
)
logger.info('Training examples: %s' , len(lowercase_ ) )
UpperCAmelCase : Dict = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
logger.info('Saving features into cached file %s' , lowercase_ )
torch.save(self.features , lowercase_ )
def __len__( self : Union[str, Any] ) -> str:
return len(self.features )
def __getitem__( self : Dict , lowercase_ : Dict ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : Tuple , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = 128 , lowercase_ : int=False , lowercase_ : bool = False , ) -> Union[str, Any]:
UpperCAmelCase : int = hans_processors[task]()
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Any = label_list
UpperCAmelCase : str = processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
UpperCAmelCase : int = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(lowercase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase : Optional[Any] = tf.data.Dataset.from_generator(
lowercase_ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
return self.dataset
def __len__( self : Tuple ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : List[Any] , lowercase_ : Union[str, Any] ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
return self.label_list
class A_ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int , lowercase_ : Optional[int] ) -> Any:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_train_set.txt' ) ) , 'train' )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> List[str]:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
return ["contradiction", "entailment", "neutral"]
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : str ) -> Dict:
UpperCAmelCase : Union[str, Any] = []
for i, line in enumerate(lowercase_ ):
if i == 0:
continue
UpperCAmelCase : Tuple = '%s-%s' % (set_type, line[0])
UpperCAmelCase : Tuple = line[5]
UpperCAmelCase : Dict = line[6]
UpperCAmelCase : Optional[Any] = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCAmelCase : Optional[Any] = line[0]
examples.append(InputExample(guid=lowercase_ , text_a=lowercase_ , text_b=lowercase_ , label=lowercase_ , pairID=lowercase_ ) )
return examples
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
UpperCAmelCase : List[str] = {label: i for i, label in enumerate(UpperCAmelCase_ )}
UpperCAmelCase : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(UpperCAmelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
UpperCAmelCase : int = tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , truncation=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , )
UpperCAmelCase : List[str] = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase : Any = int(example.pairID )
features.append(InputFeatures(**UpperCAmelCase_ , label=UpperCAmelCase_ , pairID=UpperCAmelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
lowercase__ = {
"hans": 3,
}
lowercase__ = {
"hans": HansProcessor,
}
| 695 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if dataset.ndim != value_array.ndim:
UpperCAmelCase : str = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(UpperCAmelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCAmelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(UpperCAmelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCAmelCase : List[str] = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(UpperCAmelCase_ )
UpperCAmelCase : str = []
for value in value_array:
UpperCAmelCase : Optional[Any] = euclidean(UpperCAmelCase_ , dataset[0] )
UpperCAmelCase : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCAmelCase : Tuple = euclidean(UpperCAmelCase_ , UpperCAmelCase_ )
if dist > temp_dist:
UpperCAmelCase : List[str] = temp_dist
UpperCAmelCase : str = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) / (norm(UpperCAmelCase_ ) * norm(UpperCAmelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> None:
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 695 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase__ = get_tests_dir("fixtures")
lowercase__ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
lowercase__ = get_tests_dir("fixtures/dummy-config.json")
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : Optional[Any] = 0
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ ).to_dict()
config_dict.pop('feature_extractor_type' )
UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor(**lowercase_ )
# save in new folder
model_config.save_pretrained(lowercase_ )
config.save_pretrained(lowercase_ )
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(lowercase_ )
# make sure private variable is not incorrectly saved
UpperCAmelCase : List[Any] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('bert-base' )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
with self.assertRaisesRegex(
lowercase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase : int = AutoFeatureExtractor.from_pretrained(lowercase_ , revision='aaaaaa' )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Any = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase : Dict = CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase_ ( self : int ) -> Tuple:
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = True
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# If remote code is not set, the default is to use local
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(lowercase_ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 695 | 1 |
'''simple docstring'''
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : Dict , *lowercase_ : List[str] , lowercase_ : Optional[int]=None , lowercase_ : Optional[Any]=None , **lowercase_ : Any ) -> Optional[int]:
super().__init__(*lowercase_ , **lowercase_ )
UpperCAmelCase : Any = eval_examples
UpperCAmelCase : Tuple = post_process_function
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : List[Any]=None , lowercase_ : Dict=None , lowercase_ : Optional[int]=None , lowercase_ : str = "eval" ) -> Any:
UpperCAmelCase : int = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase : List[str] = self.get_eval_dataloader(lowercase_ )
UpperCAmelCase : int = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase : List[str] = self.compute_metrics
UpperCAmelCase : List[Any] = None
UpperCAmelCase : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase : List[str] = time.time()
try:
UpperCAmelCase : List[str] = eval_loop(
lowercase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
UpperCAmelCase : Any = compute_metrics
UpperCAmelCase : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase : Dict = self.post_process_function(lowercase_ , lowercase_ , output.predictions )
UpperCAmelCase : Optional[int] = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCAmelCase : Tuple = metrics.pop(lowercase_ )
metrics.update(output.metrics )
else:
UpperCAmelCase : Optional[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase : Union[str, Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_ )
return metrics
def UpperCAmelCase_ ( self : Tuple , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : str=None , lowercase_ : str = "test" ) -> Optional[int]:
UpperCAmelCase : Dict = self.get_test_dataloader(lowercase_ )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase : List[Any] = self.compute_metrics
UpperCAmelCase : Any = None
UpperCAmelCase : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase : Union[str, Any] = time.time()
try:
UpperCAmelCase : int = eval_loop(
lowercase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
UpperCAmelCase : Dict = compute_metrics
UpperCAmelCase : Dict = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase : Optional[Any] = self.post_process_function(lowercase_ , lowercase_ , output.predictions , 'predict' )
UpperCAmelCase : Tuple = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCAmelCase : Optional[Any] = metrics.pop(lowercase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_ )
| 695 |
'''simple docstring'''
from datetime import datetime
import requests
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
UpperCAmelCase : List[str] = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(UpperCAmelCase_ ).content
if __name__ == "__main__":
lowercase__ = input("Enter Video/IGTV url: ").strip()
lowercase__ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 695 | 1 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
stooge(UpperCAmelCase_ , 0 , len(UpperCAmelCase_ ) - 1 )
return arr
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
UpperCAmelCase , UpperCAmelCase : str = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
UpperCAmelCase : Union[str, Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(UpperCAmelCase_ , UpperCAmelCase_ , (h - t) )
# Recursively sort last 2/3 elements
stooge(UpperCAmelCase_ , i + t , (UpperCAmelCase_) )
# Recursively sort first 2/3 elements
stooge(UpperCAmelCase_ , UpperCAmelCase_ , (h - t) )
if __name__ == "__main__":
lowercase__ = input("Enter numbers separated by a comma:\n").strip()
lowercase__ = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10**9 ):
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : List[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCAmelCase : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 1 |
'''simple docstring'''
import math
def UpperCamelCase( UpperCAmelCase_ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase( UpperCAmelCase_ = 1_00_01 ):
try:
UpperCAmelCase : str = int(UpperCAmelCase_ )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
UpperCAmelCase : list[int] = []
UpperCAmelCase : Dict = 2
while len(UpperCAmelCase_ ) < nth:
if is_prime(UpperCAmelCase_ ):
primes.append(UpperCAmelCase_ )
num += 1
else:
num += 1
return primes[len(UpperCAmelCase_ ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase : str = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
UpperCAmelCase : Any = self.dummy_uncond_unet
UpperCAmelCase : Tuple = DDIMScheduler()
UpperCAmelCase : Optional[Any] = self.dummy_vq_model
UpperCAmelCase : str = LDMPipeline(unet=lowercase_ , vqvae=lowercase_ , scheduler=lowercase_ )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : int = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' ).images
UpperCAmelCase : int = torch.manual_seed(0 )
UpperCAmelCase : Tuple = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : List[str] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCAmelCase : Tuple = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : Any = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Tuple = torch.manual_seed(0 )
UpperCAmelCase : Dict = ldm(generator=lowercase_ , num_inference_steps=5 , output_type='numpy' ).images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCAmelCase : Any = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 695 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
lowercase__ = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
for attribute in key.split('.' ):
UpperCAmelCase : Tuple = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
if weight_type is not None:
UpperCAmelCase : List[str] = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape
else:
UpperCAmelCase : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCAmelCase : Optional[Any] = value
elif weight_type == "weight_g":
UpperCAmelCase : Union[str, Any] = value
elif weight_type == "weight_v":
UpperCAmelCase : str = value
elif weight_type == "bias":
UpperCAmelCase : str = value
else:
UpperCAmelCase : str = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : int = []
UpperCAmelCase : Optional[int] = fairseq_model.state_dict()
UpperCAmelCase : List[str] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
UpperCAmelCase : Dict = None
for name, value in fairseq_dict.items():
UpperCAmelCase : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , hf_model.config.feat_extract_norm == 'group' , )
UpperCAmelCase : Any = True
elif name.split('.' )[0] == "proj":
UpperCAmelCase : List[Any] = fairseq_model.proj
UpperCAmelCase : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
UpperCAmelCase : Optional[Any] = True
if "*" in mapped_key:
UpperCAmelCase : int = name.split(UpperCAmelCase_ )[0].split('.' )[-2]
UpperCAmelCase : Union[str, Any] = mapped_key.replace('*' , UpperCAmelCase_ )
if "weight_g" in name:
UpperCAmelCase : Optional[Any] = 'weight_g'
elif "weight_v" in name:
UpperCAmelCase : Any = 'weight_v'
elif "bias" in name:
UpperCAmelCase : Dict = 'bias'
elif "weight" in name:
UpperCAmelCase : Optional[int] = 'weight'
else:
UpperCAmelCase : Union[str, Any] = None
set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
return proj_weight
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : str = full_name.split('conv_layers.' )[-1]
UpperCAmelCase : Optional[Any] = name.split('.' )
UpperCAmelCase : List[str] = int(items[0] )
UpperCAmelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCAmelCase : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCAmelCase : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCAmelCase : List[str] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCAmelCase : List[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = emb.weight.shape
UpperCAmelCase : List[str] = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ , bias=UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = emb.weight.data
return lin_layer
def UpperCamelCase( UpperCAmelCase_ ):
with open(UpperCAmelCase_ , 'r' , encoding='utf-8' ) as f:
UpperCAmelCase : Dict = f.readlines()
UpperCAmelCase : List[str] = [line.split(' ' )[0] for line in lines]
UpperCAmelCase : int = len(UpperCAmelCase_ )
UpperCAmelCase : int = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(UpperCAmelCase_ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
UpperCAmelCase : List[str] = WavaVecaConfig.from_pretrained(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = SpeechaTextaConfig.from_pretrained(
UpperCAmelCase_ , vocab_size=UpperCAmelCase_ , decoder_layers=UpperCAmelCase_ , do_stable_layer_norm=UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
UpperCAmelCase : Optional[Any] = model[0].eval()
# set weights for wav2vec2 encoder
UpperCAmelCase : Any = WavaVecaModel(UpperCAmelCase_ )
UpperCAmelCase : str = recursively_load_weights_wavaveca(model.encoder , UpperCAmelCase_ )
UpperCAmelCase : int = SpeechaTextaForCausalLM(UpperCAmelCase_ )
UpperCAmelCase , UpperCAmelCase : List[Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCAmelCase_ )
# set output linear layer
unexpected_keys.remove('embed_out' )
UpperCAmelCase : str = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
UpperCAmelCase : List[str] = SpeechEncoderDecoderModel(encoder=UpperCAmelCase_ , decoder=UpperCAmelCase_ )
UpperCAmelCase : Any = False
# add projection layer
UpperCAmelCase : Optional[Any] = nn.Parameter(projection_layer.weight )
UpperCAmelCase : List[str] = nn.Parameter(projection_layer.bias )
UpperCAmelCase : Tuple = create_vocab_dict(UpperCAmelCase_ )
with open(os.path.join(UpperCAmelCase_ , 'vocab.json' ) , 'w' ) as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = SpeechaTextaTokenizer(os.path.join(UpperCAmelCase_ , 'vocab.json' ) )
tokenizer.save_pretrained(UpperCAmelCase_ )
UpperCAmelCase : str = hf_wavavec.config.to_dict()
UpperCAmelCase : int = tokenizer.pad_token_id
UpperCAmelCase : str = tokenizer.bos_token_id
UpperCAmelCase : Union[str, Any] = tokenizer.eos_token_id
UpperCAmelCase : Any = 'speech_to_text_2'
UpperCAmelCase : Any = 'wav2vec2'
UpperCAmelCase : str = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase_ )
hf_wavavec.save_pretrained(UpperCAmelCase_ )
feature_extractor.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=10224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
lowercase__ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 695 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
UpperCAmelCase : Dict = self.dummy_uncond_unet
UpperCAmelCase : Dict = KarrasVeScheduler()
UpperCAmelCase : str = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : Dict = 'google/ncsnpp-celebahq-256'
UpperCAmelCase : Any = UNetaDModel.from_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = KarrasVeScheduler()
UpperCAmelCase : Dict = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = pipe(num_inference_steps=20 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 695 | 1 |
'''simple docstring'''
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowercase__ = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if got_ver is None or want_ver is None:
raise ValueError(
F"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
F""" reinstalling {pkg}.""" )
if not ops[op](version.parse(UpperCAmelCase_ ) , version.parse(UpperCAmelCase_ ) ):
raise ImportError(
F"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ = None ):
UpperCAmelCase : Any = F"""\n{hint}""" if hint is not None else ''
# non-versioned check
if re.match(R'^[\w_\-\d]+$' , UpperCAmelCase_ ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = requirement, None, None
else:
UpperCAmelCase : List[str] = re.findall(R'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , UpperCAmelCase_ )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
F""" got {requirement}""" )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = match[0]
UpperCAmelCase : Optional[int] = want_full.split(',' ) # there could be multiple requirements
UpperCAmelCase : int = {}
for w in want_range:
UpperCAmelCase : Any = re.findall(R'^([\s!=<>]{1,2})(.+)' , UpperCAmelCase_ )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
F""" but got {requirement}""" )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = match[0]
UpperCAmelCase : Any = want_ver
if op not in ops:
raise ValueError(F"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
UpperCAmelCase : int = '.'.join([str(UpperCAmelCase_ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return
# check if any version is installed
try:
UpperCAmelCase : Any = importlib.metadata.version(UpperCAmelCase_ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Any = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(UpperCAmelCase_ , UpperCAmelCase_ )
| 695 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = """autoformer"""
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Dict , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : str = "student_t" , lowercase_ : str = "nll" , lowercase_ : int = 1 , lowercase_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowercase_ : bool = True , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : Optional[List[int]] = None , lowercase_ : Optional[List[int]] = None , lowercase_ : int = 64 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 32 , lowercase_ : int = 32 , lowercase_ : str = "gelu" , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : int = 100 , lowercase_ : float = 0.02 , lowercase_ : bool = True , lowercase_ : Union[str, Any]=True , lowercase_ : int = 10 , lowercase_ : int = 25 , lowercase_ : int = 3 , **lowercase_ : str , ) -> Dict:
# time series specific configuration
UpperCAmelCase : int = prediction_length
UpperCAmelCase : Optional[Any] = context_length if context_length is not None else prediction_length
UpperCAmelCase : List[Any] = distribution_output
UpperCAmelCase : Tuple = loss
UpperCAmelCase : Dict = input_size
UpperCAmelCase : Dict = num_time_features
UpperCAmelCase : Tuple = lags_sequence
UpperCAmelCase : str = scaling
UpperCAmelCase : Optional[int] = num_dynamic_real_features
UpperCAmelCase : List[str] = num_static_real_features
UpperCAmelCase : Optional[int] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : int = cardinality
else:
UpperCAmelCase : Union[str, Any] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : Any = embedding_dimension
else:
UpperCAmelCase : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase : Dict = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase : Optional[int] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : Dict = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_attention_heads
UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase : str = decoder_ffn_dim
UpperCAmelCase : str = encoder_layers
UpperCAmelCase : Optional[Any] = decoder_layers
UpperCAmelCase : int = dropout
UpperCAmelCase : Any = attention_dropout
UpperCAmelCase : Tuple = activation_dropout
UpperCAmelCase : str = encoder_layerdrop
UpperCAmelCase : Union[str, Any] = decoder_layerdrop
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Dict = init_std
UpperCAmelCase : Union[str, Any] = use_cache
# Autoformer
UpperCAmelCase : Any = label_length
UpperCAmelCase : List[Any] = moving_average
UpperCAmelCase : Optional[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCAmelCase_ ( self : List[str] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 695 | 1 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowercase__ = pytest.mark.integration
@require_faiss
class A_ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
UpperCAmelCase : List[str] = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowercase_ ) for x in np.arange(30 ).tolist()]} )
return dset
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
import faiss
UpperCAmelCase : Dataset = self._create_dummy_dataset()
UpperCAmelCase : str = dset.map(
lambda lowercase_ , lowercase_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowercase_ , keep_in_memory=lowercase_ )
UpperCAmelCase : Dict = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCAmelCase , UpperCAmelCase : str = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def UpperCAmelCase_ ( self : str ) -> str:
import faiss
UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
UpperCAmelCase , UpperCAmelCase : Tuple = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
import faiss
UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_ ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
UpperCAmelCase , UpperCAmelCase : int = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(lowercase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def UpperCAmelCase_ ( self : int ) -> Any:
from elasticsearch import Elasticsearch
UpperCAmelCase : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
UpperCAmelCase : Optional[int] = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
UpperCAmelCase : Dict = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
UpperCAmelCase : Union[str, Any] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=lowercase_ )
UpperCAmelCase , UpperCAmelCase : str = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class A_ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self : str ) -> List[Any]:
import faiss
UpperCAmelCase : Dict = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
UpperCAmelCase : List[Any] = np.zeros(5 , dtype=np.floataa )
UpperCAmelCase : int = 1
UpperCAmelCase , UpperCAmelCase : Tuple = index.search(lowercase_ )
self.assertRaises(lowercase_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
UpperCAmelCase : Union[str, Any] = np.eye(5 , dtype=np.floataa )[::-1]
UpperCAmelCase , UpperCAmelCase : int = index.search_batch(lowercase_ )
self.assertRaises(lowercase_ , index.search_batch , queries[0] )
UpperCAmelCase : str = [scores[0] for scores in total_scores]
UpperCAmelCase : Union[str, Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
import faiss
UpperCAmelCase : List[Any] = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
UpperCAmelCase : Optional[Any] = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
import faiss
UpperCAmelCase : Optional[int] = faiss.IndexFlat(5 )
UpperCAmelCase : int = FaissIndex(custom_index=lowercase_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def UpperCAmelCase_ ( self : Dict ) -> int:
import faiss
UpperCAmelCase : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_ ) as tmp_file:
index.save(tmp_file.name )
UpperCAmelCase : Optional[int] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
UpperCAmelCase : List[str] = np.zeros(5 , dtype=np.floataa )
UpperCAmelCase : int = 1
UpperCAmelCase , UpperCAmelCase : Any = index.search(lowercase_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def UpperCamelCase( UpperCAmelCase_ ):
import faiss
UpperCAmelCase : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
UpperCAmelCase : str = 'index.faiss'
UpperCAmelCase : int = F"""mock://{index_name}"""
index.save(UpperCAmelCase_ , storage_options=mockfs.storage_options )
UpperCAmelCase : Tuple = FaissIndex.load(UpperCAmelCase_ , storage_options=mockfs.storage_options )
UpperCAmelCase : int = np.zeros(5 , dtype=np.floataa )
UpperCAmelCase : str = 1
UpperCAmelCase , UpperCAmelCase : Any = index.search(UpperCAmelCase_ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class A_ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
UpperCAmelCase : Dict = Elasticsearch()
UpperCAmelCase : str = {'acknowledged': True}
UpperCAmelCase : List[Any] = ElasticSearchIndex(es_client=lowercase_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
UpperCAmelCase : Dict = 'foo'
UpperCAmelCase : List[str] = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
UpperCAmelCase , UpperCAmelCase : int = index.search(lowercase_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
UpperCAmelCase : str = 'foo'
UpperCAmelCase : int = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
UpperCAmelCase , UpperCAmelCase : Dict = index.search(lowercase_ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
UpperCAmelCase : Optional[Any] = ['foo', 'bar', 'foobar']
UpperCAmelCase : Tuple = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
UpperCAmelCase , UpperCAmelCase : Tuple = index.search_batch(lowercase_ )
UpperCAmelCase : int = [scores[0] for scores in total_scores]
UpperCAmelCase : Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_ ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase_ )
# batched queries with timeout
UpperCAmelCase : Dict = ['foo', 'bar', 'foobar']
UpperCAmelCase : List[str] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
UpperCAmelCase , UpperCAmelCase : Tuple = index.search_batch(lowercase_ , request_timeout=30 )
UpperCAmelCase : int = [scores[0] for scores in total_scores]
UpperCAmelCase : Any = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_ ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase_ )
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Dict = """roberta"""
def __init__( self : Union[str, Any] , lowercase_ : str=50_265 , lowercase_ : Any=768 , lowercase_ : List[Any]=12 , lowercase_ : Optional[int]=12 , lowercase_ : Union[str, Any]=3_072 , lowercase_ : Any="gelu" , lowercase_ : int=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : Tuple=512 , lowercase_ : List[Any]=2 , lowercase_ : List[str]=0.02 , lowercase_ : int=1E-12 , lowercase_ : int=1 , lowercase_ : Optional[int]=0 , lowercase_ : int=2 , lowercase_ : Optional[int]="absolute" , lowercase_ : Optional[int]=True , lowercase_ : Union[str, Any]=None , **lowercase_ : Optional[int] , ) -> Tuple:
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : Optional[Any] = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : str = hidden_act
UpperCAmelCase : str = intermediate_size
UpperCAmelCase : Dict = hidden_dropout_prob
UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase : str = max_position_embeddings
UpperCAmelCase : int = type_vocab_size
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : int = position_embedding_type
UpperCAmelCase : List[Any] = use_cache
UpperCAmelCase : Optional[int] = classifier_dropout
class A_ ( _snake_case ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : int ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 695 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : int = sin(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = cos(UpperCAmelCase_ )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : Tuple = (1 - _cos) / 2
UpperCAmelCase : Dict = 1 - _cos
UpperCAmelCase : Union[str, Any] = 1 + alpha
UpperCAmelCase : List[str] = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha
UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[str] = cos(UpperCAmelCase_ )
UpperCAmelCase : Tuple = _sin / (2 * q_factor)
UpperCAmelCase : int = (1 + _cos) / 2
UpperCAmelCase : List[Any] = -1 - _cos
UpperCAmelCase : Optional[int] = 1 + alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : str = 1 - alpha
UpperCAmelCase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : List[Any] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : Any = _sin / (2 * q_factor)
UpperCAmelCase : Optional[Any] = _sin / 2
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : int = -ba
UpperCAmelCase : List[str] = 1 + alpha
UpperCAmelCase : int = -2 * _cos
UpperCAmelCase : Optional[int] = 1 - alpha
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : Tuple = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : int = 1 - alpha
UpperCAmelCase : Dict = -2 * _cos
UpperCAmelCase : Any = 1 + alpha
UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase : Union[str, Any] = 10 ** (gain_db / 40)
UpperCAmelCase : int = 1 + alpha * big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha * big_a
UpperCAmelCase : Tuple = 1 + alpha / big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : int = 1 - alpha / big_a
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Dict = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : Any = cos(UpperCAmelCase_ )
UpperCAmelCase : str = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : List[str] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : List[Any] = big_a * (pmc + aaa)
UpperCAmelCase : Optional[int] = 2 * big_a * mpc
UpperCAmelCase : Optional[int] = big_a * (pmc - aaa)
UpperCAmelCase : str = ppmc + aaa
UpperCAmelCase : int = -2 * pmpc
UpperCAmelCase : int = ppmc - aaa
UpperCAmelCase : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : List[Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : Optional[Any] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : Dict = big_a * (ppmc + aaa)
UpperCAmelCase : List[str] = -2 * big_a * pmpc
UpperCAmelCase : int = big_a * (ppmc - aaa)
UpperCAmelCase : Dict = pmc + aaa
UpperCAmelCase : Optional[int] = 2 * mpc
UpperCAmelCase : int = pmc - aaa
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 695 | 1 |
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
| 695 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowercase__ = TypeVar("T")
class A_ ( Generic[T] ):
'''simple docstring'''
UpperCAmelCase_ : deque[T] # Cache store of keys
UpperCAmelCase_ : set[T] # References of the keys in cache
UpperCAmelCase_ : int = 10 # Maximum capacity of cache
def __init__( self : List[Any] , lowercase_ : int ) -> None:
UpperCAmelCase : Any = deque()
UpperCAmelCase : Dict = set()
if not n:
UpperCAmelCase : Optional[int] = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCAmelCase : str = n
def UpperCAmelCase_ ( self : List[str] , lowercase_ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCAmelCase : Optional[Any] = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> None:
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Union[str, Any] ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 695 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.