code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowercase_ = version.parse(importlib_metadata.version("""nltk"""))
if NLTK_VERSION >= version.Version("""3.6.4"""):
from nltk import word_tokenize
lowercase_ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowercase_ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowercase_ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _snake_case ( datasets.Metric):
def A__ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("string", id="sequence" ),
"references": datasets.Value("string", id="sequence" ),
} ), codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"], reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
], )
def A__ ( self : Tuple, __lowercase : List[Any] ):
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def A__ ( self : List[Any], __lowercase : Optional[int], __lowercase : List[Any], __lowercase : Union[str, Any]=0.9, __lowercase : Tuple=3, __lowercase : Any=0.5 ):
if NLTK_VERSION >= version.Version("3.6.5" ):
lowercase__ = [
meteor_score.single_meteor_score(
word_tokenize(_SCREAMING_SNAKE_CASE ), word_tokenize(_SCREAMING_SNAKE_CASE ), alpha=_SCREAMING_SNAKE_CASE, beta=_SCREAMING_SNAKE_CASE, gamma=_SCREAMING_SNAKE_CASE )
for ref, pred in zip(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
]
else:
lowercase__ = [
meteor_score.single_meteor_score(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, alpha=_SCREAMING_SNAKE_CASE, beta=_SCREAMING_SNAKE_CASE, gamma=_SCREAMING_SNAKE_CASE )
for ref, pred in zip(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
]
return {"meteor": np.mean(_SCREAMING_SNAKE_CASE )}
| 413 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: Union[str, Any] = logging.get_logger(__name__)
A: Optional[int] = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : str = 'nllb-moe'
__lowerCAmelCase : List[Any] = ['past_key_values']
__lowerCAmelCase : Dict = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _SCREAMING_SNAKE_CASE=128112 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.05 , _SCREAMING_SNAKE_CASE=0.05 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="float32" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.001 , _SCREAMING_SNAKE_CASE=0.001 , _SCREAMING_SNAKE_CASE="all" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=0.2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Optional[int] = vocab_size
UpperCAmelCase : Optional[int] = max_position_embeddings
UpperCAmelCase : str = d_model
UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : Dict = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_ffn_dim
UpperCAmelCase : List[Any] = decoder_layers
UpperCAmelCase : Tuple = decoder_attention_heads
UpperCAmelCase : Any = dropout
UpperCAmelCase : Optional[int] = attention_dropout
UpperCAmelCase : Union[str, Any] = activation_dropout
UpperCAmelCase : Dict = activation_function
UpperCAmelCase : int = init_std
UpperCAmelCase : List[Any] = encoder_layerdrop
UpperCAmelCase : Optional[Any] = decoder_layerdrop
UpperCAmelCase : str = use_cache
UpperCAmelCase : List[Any] = encoder_layers
UpperCAmelCase : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase : Optional[Any] = router_z_loss_coef
UpperCAmelCase : List[str] = router_aux_loss_coef
UpperCAmelCase : str = decoder_sparse_step
UpperCAmelCase : str = encoder_sparse_step
UpperCAmelCase : Optional[int] = num_experts
UpperCAmelCase : Optional[int] = expert_capacity
UpperCAmelCase : List[Any] = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
UpperCAmelCase : int = router_dtype
UpperCAmelCase : Optional[int] = router_ignore_padding_tokens
UpperCAmelCase : Tuple = batch_prioritized_routing
UpperCAmelCase : Any = second_expert_policy
UpperCAmelCase : List[str] = normalize_router_prob_before_dropping
UpperCAmelCase : str = moe_eval_capacity_token_fraction
UpperCAmelCase : Union[str, Any] = moe_token_dropout
UpperCAmelCase : Any = output_router_logits
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
| 160 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :int ):
'''simple docstring'''
a = [0 for i in range(r + 1 )]
# nc0 = 1
a = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
a = min(UpperCAmelCase__ , UpperCAmelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 713 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _lowercase :
def __init__( self : List[str] ) -> List[str]:
"""simple docstring"""
a = ""
a = ""
a = []
a = 0
a = 256
a = 0
a = 0
a = 0
a = 0
def A ( self : Optional[Any] , __lowerCAmelCase : Any ) -> int:
"""simple docstring"""
a = cva.imread(__lowerCAmelCase , 0 )
a = copy.deepcopy(self.img )
a , a , a = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
a = np.sum(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
a = x[i] / self.k
self.sk += prk
a = (self.L - 1) * self.sk
if self.rem != 0:
a = int(last % last )
a = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__lowerCAmelCase )
a = int(np.ma.count(self.img ) / self.img[1].size )
a = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
a = self.img[j][i]
if num != self.last_list[num]:
a = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def A ( self : Any ) -> int:
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def A ( self : Any ) -> int:
"""simple docstring"""
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
A_ : List[Any] = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
A_ : int = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 32 | 0 |
def _a ( lowerCAmelCase )-> int:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError('only integers accepted as input' )
else:
SCREAMING_SNAKE_CASE_ = str(abs(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = [list(lowerCAmelCase ) for char in range(len(lowerCAmelCase ) )]
for index in range(len(lowerCAmelCase ) ):
num_transpositions[index].pop(lowerCAmelCase )
return max(
int(''.join(list(lowerCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod() | 360 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE: Any = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE: List[Any] = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE: List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 360 | 1 |
a_ : Union[str, Any] = "Tobias Carryer"
from time import time
class UpperCamelCase :
def __init__( self : Optional[Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Optional[int]=int(time() ) ): # noqa: B008
"""simple docstring"""
SCREAMING_SNAKE_CASE = multiplier
SCREAMING_SNAKE_CASE = increment
SCREAMING_SNAKE_CASE = modulo
SCREAMING_SNAKE_CASE = seed
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
a_ : List[Any] = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 710 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a_ : Any = logging.get_logger(__name__)
a_ : Dict = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="van"
def __init__( self : Optional[Any] , snake_case__ : Tuple=2_2_4 , snake_case__ : Dict=3 , snake_case__ : Union[str, Any]=[7, 3, 3, 3] , snake_case__ : str=[4, 2, 2, 2] , snake_case__ : Optional[Any]=[6_4, 1_2_8, 3_2_0, 5_1_2] , snake_case__ : Optional[Any]=[3, 3, 1_2, 3] , snake_case__ : Tuple=[8, 8, 4, 4] , snake_case__ : Any="gelu" , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1E-6 , snake_case__ : int=1E-2 , snake_case__ : Any=0.0 , snake_case__ : Tuple=0.0 , **snake_case__ : Any , ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_sizes
SCREAMING_SNAKE_CASE = strides
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = mlp_ratios
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = dropout_rate
| 673 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def A__ ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = analyze_text(__lowerCamelCase )
_lowerCAmelCase = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
_lowerCAmelCase = sum(single_char_strings.values() )
# one length string
_lowerCAmelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_lowerCAmelCase = single_char_strings[ch]
_lowerCAmelCase = my_str / all_sum
my_fir_sum += prob * math.loga(__lowerCamelCase ) # entropy formula.
# print entropy
print(F'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_lowerCAmelCase = sum(two_char_strings.values() )
_lowerCAmelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_lowerCAmelCase = cha + cha
if sequence in two_char_strings:
_lowerCAmelCase = two_char_strings[sequence]
_lowerCAmelCase = int(__lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(__lowerCamelCase )
# print second entropy
print(F'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def A__ ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = Counter() # type: ignore
_lowerCAmelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(__lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def A__ ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 589 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int | float | str , SCREAMING_SNAKE_CASE : int | float | str ):
if nth_term == "":
return [""]
UpperCAmelCase = int(SCREAMING_SNAKE_CASE )
UpperCAmelCase = int(SCREAMING_SNAKE_CASE )
UpperCAmelCase = []
for temp in range(int(SCREAMING_SNAKE_CASE ) ):
series.append(f'''1 / {pow(temp + 1 , int(SCREAMING_SNAKE_CASE ) )}''' if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
_a : List[Any] = int(input('Enter the last number (nth term) of the P-Series'))
_a : Tuple = int(input('Enter the power for P-Series'))
print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p')
print(p_series(nth_term, power))
| 447 | 0 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
lowerCAmelCase : Dict = sum(SCREAMING_SNAKE_CASE__ ) / len(SCREAMING_SNAKE_CASE__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 |
lowerCAmelCase : str ={
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 693 | 1 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] ="ylacombe/bark-small"
lowerCamelCase__: Tuple =tempfile.mkdtemp()
lowerCamelCase__: Tuple ="en_speaker_1"
lowerCamelCase__: Optional[int] ="This is a test string"
lowerCamelCase__: List[str] ="speaker_embeddings_path.json"
lowerCamelCase__: int ="speaker_embeddings"
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , **UpperCAmelCase_ : Any) ->Tuple:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.get_tokenizer()
lowerCamelCase__: List[str] =BarkProcessor(tokenizer=UpperCAmelCase_)
processor.save_pretrained(self.tmpdirname)
lowerCamelCase__: Dict =BarkProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
@slow
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Tuple =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowerCamelCase__: Dict =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)")
lowerCamelCase__: Any =BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->int:
'''simple docstring'''
lowerCamelCase__: Any =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowerCamelCase__: List[str] =35
lowerCamelCase__: Optional[Any] =2
lowerCamelCase__: Optional[Any] =8
lowerCamelCase__: Optional[int] ={
"semantic_prompt": np.ones(UpperCAmelCase_),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len)),
"fine_prompt": np.ones((nb_codebooks_total, seq_len)),
}
# test providing already loaded voice_preset
lowerCamelCase__: Any =processor(text=self.input_string , voice_preset=UpperCAmelCase_)
lowerCamelCase__: int =inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([])).tolist())
# test loading voice preset from npz file
lowerCamelCase__: Union[str, Any] =os.path.join(self.tmpdirname , "file.npz")
np.savez(UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Tuple =processor(text=self.input_string , voice_preset=UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([])).tolist())
# test loading voice preset from the hub
lowerCamelCase__: Any =processor(text=self.input_string , voice_preset=self.voice_preset)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: str =self.get_tokenizer()
lowerCamelCase__: Dict =BarkProcessor(tokenizer=UpperCAmelCase_)
lowerCamelCase__: List[Any] =processor(text=self.input_string)
lowerCamelCase__: Optional[int] =tokenizer(
self.input_string , padding="max_length" , max_length=256 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist())
| 59 | '''simple docstring'''
def __lowerCAmelCase ( a_ ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
SCREAMING_SNAKE_CASE : Optional[int] = 4
SCREAMING_SNAKE_CASE : Optional[Any] = (1 << p) - 1
for _ in range(p - 2 ):
SCREAMING_SNAKE_CASE : Optional[int] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 251 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __magic_name__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
UpperCamelCase_ = '''ssube/stable-diffusion-x4-upscaler-onnx'''
def lowercase_ ( self , A_=0 ) -> Optional[Any]:
"""simple docstring"""
_lowercase: Optional[int] = floats_tensor((1, 3, 128, 128) , rng=random.Random(A_ ) )
_lowercase: Dict = torch.manual_seed(A_ )
_lowercase: Any = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowercase: List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=A_ )
_lowercase: Tuple = self.get_dummy_inputs()
_lowercase: List[str] = pipe(**A_ ).images
_lowercase: Tuple = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
_lowercase: Dict = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def lowercase_ ( self ) -> Any:
"""simple docstring"""
_lowercase: Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_lowercase: Optional[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=A_ )
pipe.set_progress_bar_config(disable=A_ )
_lowercase: Optional[int] = self.get_dummy_inputs()
_lowercase: str = pipe(**A_ ).images
_lowercase: Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowercase: Tuple = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase_ ( self ) -> int:
"""simple docstring"""
_lowercase: Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_lowercase: List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
_lowercase: Any = self.get_dummy_inputs()
_lowercase: List[str] = pipe(**A_ ).images
_lowercase: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowercase: Any = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowercase: Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_lowercase: Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
_lowercase: str = self.get_dummy_inputs()
_lowercase: List[Any] = pipe(**A_ ).images
_lowercase: Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowercase: int = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowercase: str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_lowercase: Union[str, Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
_lowercase: Optional[int] = self.get_dummy_inputs()
_lowercase: Optional[int] = pipe(**A_ ).images
_lowercase: Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowercase: int = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
@property
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowercase: List[str] = ort.SessionOptions()
_lowercase: List[Any] = False
return options
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowercase: Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_lowercase: Any = init_image.resize((128, 128) )
# using the PNDM scheduler by default
_lowercase: Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_ )
_lowercase: Optional[Any] = '''A fantasy landscape, trending on artstation'''
_lowercase: str = torch.manual_seed(0 )
_lowercase: Optional[int] = pipe(
prompt=A_ , image=A_ , guidance_scale=7.5 , num_inference_steps=10 , generator=A_ , output_type='''np''' , )
_lowercase: Dict = output.images
_lowercase: Optional[int] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
_lowercase: Tuple = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
_lowercase: Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_lowercase: List[str] = init_image.resize((128, 128) )
_lowercase: Dict = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' )
_lowercase: str = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=A_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_ )
_lowercase: List[str] = '''A fantasy landscape, trending on artstation'''
_lowercase: int = torch.manual_seed(0 )
_lowercase: Dict = pipe(
prompt=A_ , image=A_ , guidance_scale=7.5 , num_inference_steps=20 , generator=A_ , output_type='''np''' , )
_lowercase: str = output.images
_lowercase: Optional[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
_lowercase: Optional[int] = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 272 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A__ : str = logging.get_logger(__name__)
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=False ):
"""simple docstring"""
_lowercase: Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
_lowercase: Union[str, Any] = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowercase: List[str] = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' )
_lowercase: Optional[Any] = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase: List[str] = in_proj_weight[
: config.hidden_size, :
]
_lowercase: Dict = in_proj_bias[: config.hidden_size]
_lowercase: str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowercase: Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowercase: List[str] = in_proj_weight[
-config.hidden_size :, :
]
_lowercase: Any = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
_lowercase: Optional[Any] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase: Tuple = dct.pop(_UpperCamelCase )
_lowercase: Optional[Any] = val
@torch.no_grad()
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase: List[str] = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=_UpperCamelCase )
_lowercase: List[Any] = False
_lowercase: List[str] = False
_lowercase: Optional[int] = False
_lowercase: Optional[int] = False
if "vqa" in checkpoint_url:
_lowercase: Tuple = True
_lowercase: int = 3_129
_lowercase: Union[str, Any] = '''huggingface/label-files'''
_lowercase: Optional[int] = '''vqa2-id2label.json'''
_lowercase: Optional[int] = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowercase: str = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
_lowercase: int = idalabel
_lowercase: Any = {v: k for k, v in idalabel.items()}
_lowercase: str = ViltForQuestionAnswering(_UpperCamelCase )
elif "nlvr" in checkpoint_url:
_lowercase: List[str] = True
_lowercase: Tuple = 2
_lowercase: int = {0: '''False''', 1: '''True'''}
_lowercase: Any = {v: k for k, v in config.idalabel.items()}
_lowercase: Any = 3
_lowercase: Optional[Any] = ViltForImagesAndTextClassification(_UpperCamelCase )
elif "irtr" in checkpoint_url:
_lowercase: Dict = True
_lowercase: Union[str, Any] = ViltForImageAndTextRetrieval(_UpperCamelCase )
elif "mlm_itm" in checkpoint_url:
_lowercase: Any = True
_lowercase: str = ViltForMaskedLM(_UpperCamelCase )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
_lowercase: Tuple = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location='''cpu''' )['''state_dict''']
_lowercase: Union[str, Any] = create_rename_keys(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase )
if mlm_model or irtr_model:
_lowercase: Optional[int] = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_lowercase , _lowercase: Optional[Any] = model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_UpperCamelCase )
# Define processor
_lowercase: List[str] = ViltImageProcessor(size=384 )
_lowercase: Tuple = BertTokenizer.from_pretrained('''bert-base-uncased''' )
_lowercase: Tuple = ViltProcessor(_UpperCamelCase , _UpperCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
_lowercase: Union[str, Any] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=_UpperCamelCase ).raw )
_lowercase: Union[str, Any] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=_UpperCamelCase ).raw )
_lowercase: Tuple = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
_lowercase: Optional[int] = processor(_UpperCamelCase , _UpperCamelCase , return_tensors='''pt''' )
_lowercase: int = processor(_UpperCamelCase , _UpperCamelCase , return_tensors='''pt''' )
_lowercase: Tuple = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_lowercase: List[Any] = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=_UpperCamelCase ).raw )
if mlm_model:
_lowercase: Optional[Any] = '''a bunch of [MASK] laying on a [MASK].'''
else:
_lowercase: List[Any] = '''How many cats are there?'''
_lowercase: Any = processor(_UpperCamelCase , _UpperCamelCase , return_tensors='''pt''' )
_lowercase: int = model(**_UpperCamelCase )
# Verify outputs
if mlm_model:
_lowercase: List[str] = torch.Size([1, 11, 30_522] )
_lowercase: str = torch.tensor([-12.5_061, -12.5_123, -12.5_174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _UpperCamelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
_lowercase: Union[str, Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_lowercase: int = torch.Size([1, 3_129] )
_lowercase: Optional[Any] = torch.tensor([-15.9_495, -18.1_472, -10.3_041] )
assert torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _UpperCamelCase , atol=1e-4 )
# verify vqa prediction equals "2"
_lowercase: Optional[Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_lowercase: List[Any] = torch.Size([1, 2] )
_lowercase: int = torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
A__ : Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 272 | 1 |
from itertools import product
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[int]:
UpperCamelCase = sides_number
UpperCamelCase = max_face_number * dice_number
UpperCamelCase = [0] * (max_total + 1)
UpperCamelCase = 1
UpperCamelCase = range(_lowercase , max_face_number + 1 )
for dice_numbers in product(_lowercase , repeat=_lowercase ):
UpperCamelCase = sum(_lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def __lowerCamelCase ( ) -> float:
UpperCamelCase = total_frequency_distribution(
sides_number=4 , dice_number=9 )
UpperCamelCase = total_frequency_distribution(
sides_number=6 , dice_number=6 )
UpperCamelCase = 0
UpperCamelCase = 9
UpperCamelCase = 4 * 9
UpperCamelCase = 6
for peter_total in range(_lowercase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
UpperCamelCase = (4**9) * (6**6)
UpperCamelCase = peter_wins_count / total_games_number
UpperCamelCase = round(_lowercase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"{solution() = }")
| 282 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple =(DEISMultistepScheduler,)
SCREAMING_SNAKE_CASE_ : List[str] =(("num_inference_steps", 25),)
def __lowerCAmelCase ( self : Tuple , **SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
UpperCamelCase = {
'num_train_timesteps': 10_00,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Dict=0 , **SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
UpperCamelCase = dict(self.forward_default_kwargs )
UpperCamelCase = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = self.dummy_sample
UpperCamelCase = 0.1 * sample
UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCamelCase = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
UpperCamelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCamelCase , UpperCamelCase = sample, sample
for t in range(SCREAMING_SNAKE_CASE__ , time_step + scheduler.config.solver_order + 1 ):
UpperCamelCase = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
UpperCamelCase = new_scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , **SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
UpperCamelCase = dict(self.forward_default_kwargs )
UpperCamelCase = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = self.dummy_sample
UpperCamelCase = 0.1 * sample
UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residual (must be after setting timesteps)
UpperCamelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCamelCase = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
UpperCamelCase = new_scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any]=None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
if scheduler is None:
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = scheduler_class(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = scheduler_class(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = 10
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
return sample
def __lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = dict(self.forward_default_kwargs )
UpperCamelCase = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE__ )
for scheduler_class in self.scheduler_classes:
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = self.dummy_sample
UpperCamelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE__ , 'set_timesteps' ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE__ , 'set_timesteps' ):
UpperCamelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order]
UpperCamelCase = scheduler.timesteps[5]
UpperCamelCase = scheduler.timesteps[6]
UpperCamelCase = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
UpperCamelCase = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = DEISMultistepScheduler(**self.get_scheduler_config() )
UpperCamelCase = self.full_loop(scheduler=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
UpperCamelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
UpperCamelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
UpperCamelCase = UniPCMultistepScheduler.from_config(scheduler.config )
UpperCamelCase = DEISMultistepScheduler.from_config(scheduler.config )
UpperCamelCase = self.full_loop(scheduler=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
def __lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str ):
"""simple docstring"""
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , sample_max_value=SCREAMING_SNAKE_CASE__ , algorithm_type='deis' , solver_order=SCREAMING_SNAKE_CASE__ , solver_type=SCREAMING_SNAKE_CASE__ , )
def __lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=SCREAMING_SNAKE_CASE__ , solver_type=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , algorithm_type=SCREAMING_SNAKE_CASE__ , )
UpperCamelCase = self.full_loop(
solver_order=SCREAMING_SNAKE_CASE__ , solver_type=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , algorithm_type=SCREAMING_SNAKE_CASE__ , )
assert not torch.isnan(SCREAMING_SNAKE_CASE__ ).any(), "Samples have nan numbers"
def __lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE__ )
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE__ , time_step=0 )
def __lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.full_loop()
UpperCamelCase = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
def __lowerCAmelCase ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.full_loop(prediction_type='v_prediction' )
UpperCamelCase = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE__ , dynamic_thresholding_ratio=0 )
UpperCamelCase = scheduler_class(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = 10
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
assert sample.dtype == torch.floataa
| 282 | 1 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class UpperCamelCase__ ( pl.LightningModule):
'''simple docstring'''
def __init__( self , A ) ->List[str]:
super().__init__()
UpperCAmelCase__ :Optional[int] = model
UpperCAmelCase__ :Optional[int] = 2
UpperCAmelCase__ :Union[str, Any] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def A__ ( self ) ->str:
pass
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase__ :Optional[Any] = LongformerModel.from_pretrained(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Optional[int] = LightningModel(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Tuple = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
UpperCAmelCase__ :Union[str, Any] = LongformerForQuestionAnswering.from_pretrained(SCREAMING_SNAKE_CASE )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(SCREAMING_SNAKE_CASE )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__snake_case : List[Any] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 433 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__snake_case : int = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
__snake_case : Tuple = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
__snake_case : Optional[int] = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class UpperCamelCase__ ( UpperCAmelCase__):
'''simple docstring'''
__a : Dict = VOCAB_FILES_NAMES
__a : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__a : Tuple = PRETRAINED_INIT_CONFIGURATION
__a : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Union[str, Any] = BertTokenizer
def __init__( self , A=None , A=None , A=True , A="[UNK]" , A="[SEP]" , A="[PAD]" , A="[CLS]" , A="[MASK]" , A=True , A=None , **A , ) ->Tuple:
super().__init__(
A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , )
UpperCAmelCase__ :Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , A ) != do_lower_case
or normalizer_state.get('strip_accents' , A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , A ) != tokenize_chinese_chars
):
UpperCAmelCase__ :Any = getattr(A , normalizer_state.pop('type' ) )
UpperCAmelCase__ :Any = do_lower_case
UpperCAmelCase__ :Tuple = strip_accents
UpperCAmelCase__ :List[Any] = tokenize_chinese_chars
UpperCAmelCase__ :Dict = normalizer_class(**A )
UpperCAmelCase__ :Dict = do_lower_case
def A__ ( self , A , A=None ) ->Optional[int]:
UpperCAmelCase__ :str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , A , A = None ) ->List[int]:
UpperCAmelCase__ :int = [self.sep_token_id]
UpperCAmelCase__ :Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , A , A = None ) ->Tuple[str]:
UpperCAmelCase__ :str = self._tokenizer.model.save(A , name=A )
return tuple(A )
| 433 | 1 |
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
if len(_SCREAMING_SNAKE_CASE ) <= 1:
return lst
_snake_case = 1
while i < len(_SCREAMING_SNAKE_CASE ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_snake_case = lst[i], lst[i - 1]
i -= 1
if i == 0:
_snake_case = 1
return lst
if __name__ == "__main__":
__lowerCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted)) | 585 | '''simple docstring'''
from __future__ import annotations
import time
lowerCAmelCase_ : Any = list[tuple[int, int]]
lowerCAmelCase_ : List[str] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCAmelCase_ : Optional[Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int , lowercase__ : int , lowercase__ : int , lowercase__ : int , lowercase__ : int , lowercase__ : Node | None ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase : int = pos_x
_UpperCamelCase : List[Any] = pos_y
_UpperCamelCase : List[Any] = (pos_y, pos_x)
_UpperCamelCase : Any = goal_x
_UpperCamelCase : Optional[int] = goal_y
_UpperCamelCase : int = parent
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , lowercase__ : tuple[int, int] , lowercase__ : tuple[int, int] ) ->int:
'''simple docstring'''
_UpperCamelCase : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , lowercase__ )
_UpperCamelCase : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowercase__ )
_UpperCamelCase : Tuple = [self.start]
_UpperCamelCase : Union[str, Any] = False
def snake_case__ ( self : Tuple ) ->Path | None:
'''simple docstring'''
while self.node_queue:
_UpperCamelCase : Optional[int] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
_UpperCamelCase : Tuple = True
return self.retrace_path(lowercase__ )
_UpperCamelCase : List[str] = self.get_successors(lowercase__ )
for node in successors:
self.node_queue.append(lowercase__ )
if not self.reached:
return [self.start.pos]
return None
def snake_case__ ( self : int , lowercase__ : Node ) ->list[Node]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = []
for action in delta:
_UpperCamelCase : Any = parent.pos_x + action[1]
_UpperCamelCase : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(lowercase__ , lowercase__ , self.target.pos_y , self.target.pos_x , lowercase__ ) )
return successors
def snake_case__ ( self : str , lowercase__ : Node | None ) ->Path:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = node
_UpperCamelCase : Tuple = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_UpperCamelCase : List[Any] = current_node.parent
path.reverse()
return path
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int , lowercase__ : Optional[int] , lowercase__ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = BreadthFirstSearch(lowercase__ , lowercase__ )
_UpperCamelCase : List[str] = BreadthFirstSearch(lowercase__ , lowercase__ )
_UpperCamelCase : int = False
def snake_case__ ( self : List[Any] ) ->Path | None:
'''simple docstring'''
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
_UpperCamelCase : Dict = self.fwd_bfs.node_queue.pop(0 )
_UpperCamelCase : Any = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
_UpperCamelCase : List[str] = True
return self.retrace_bidirectional_path(
lowercase__ , lowercase__ )
_UpperCamelCase : Dict = current_bwd_node
_UpperCamelCase : Optional[int] = current_fwd_node
_UpperCamelCase : List[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(lowercase__ ),
self.bwd_bfs: self.bwd_bfs.get_successors(lowercase__ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(lowercase__ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def snake_case__ ( self : Dict , lowercase__ : Node , lowercase__ : Node ) ->Path:
'''simple docstring'''
_UpperCamelCase : Tuple = self.fwd_bfs.retrace_path(lowercase__ )
_UpperCamelCase : int = self.bwd_bfs.retrace_path(lowercase__ )
bwd_path.pop()
bwd_path.reverse()
_UpperCamelCase : Optional[int] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
lowerCAmelCase_ : List[Any] = (0, 0)
lowerCAmelCase_ : List[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCAmelCase_ : Any = time.time()
lowerCAmelCase_ : Dict = BreadthFirstSearch(init, goal)
lowerCAmelCase_ : List[str] = bfs.search()
lowerCAmelCase_ : Optional[Any] = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
lowerCAmelCase_ : List[str] = time.time()
lowerCAmelCase_ : Tuple = BidirectionalBreadthFirstSearch(init, goal)
lowerCAmelCase_ : Optional[int] = bd_bfs.search()
lowerCAmelCase_ : Dict = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 435 | 0 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = 1
SCREAMING_SNAKE_CASE : List[Any] = 3
SCREAMING_SNAKE_CASE : int = (32, 32)
SCREAMING_SNAKE_CASE : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase__ )
return image
@property
def _UpperCamelCase ( self ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def _UpperCamelCase ( self ) -> Any:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def _UpperCamelCase ( self ) -> int:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowercase__ )
@property
def _UpperCamelCase ( self ) -> List[Any]:
def extract(*lowercase__ , **lowercase__ ):
class UpperCAmelCase :
'''simple docstring'''
def __init__( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Tuple = torch.ones([0] )
def _UpperCamelCase ( self , lowercase__ ) -> int:
self.pixel_values.to(lowercase__ )
return self
return Out()
return extract
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_cond_unet
SCREAMING_SNAKE_CASE : Dict = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=lowercase__ , set_alpha_to_one=lowercase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_vae
SCREAMING_SNAKE_CASE : Any = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionPipeline(
unet=lowercase__ , scheduler=lowercase__ , vae=lowercase__ , text_encoder=lowercase__ , tokenizer=lowercase__ , safety_checker=lowercase__ , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE : int = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
SCREAMING_SNAKE_CASE : Any = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE : int = torch.Generator(device=lowercase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe([prompt] , generator=lowercase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
SCREAMING_SNAKE_CASE : Optional[int] = output.images
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=lowercase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = sd_pipe(
[prompt] , generator=lowercase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowercase__ , )[0]
SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[Any] = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : int = self.dummy_cond_unet
SCREAMING_SNAKE_CASE : Dict = PNDMScheduler(skip_prk_steps=lowercase__ )
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_vae
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionPipeline(
unet=lowercase__ , scheduler=lowercase__ , vae=lowercase__ , text_encoder=lowercase__ , tokenizer=lowercase__ , safety_checker=lowercase__ , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=lowercase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe([prompt] , generator=lowercase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
SCREAMING_SNAKE_CASE : Any = output.images
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Generator(device=lowercase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(
[prompt] , generator=lowercase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowercase__ , )[0]
SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=lowercase__ )
assert isinstance(lowercase__ , lowercase__ )
assert isinstance(pipe.scheduler , lowercase__ )
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE : int = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE : str = StableDiffusionPipeline.from_pretrained(lowercase__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE : Optional[int] = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE : List[str] = self.dummy_cond_unet
SCREAMING_SNAKE_CASE : int = PNDMScheduler(skip_prk_steps=lowercase__ )
SCREAMING_SNAKE_CASE : Any = self.dummy_vae
SCREAMING_SNAKE_CASE : List[str] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# put models in fp16
SCREAMING_SNAKE_CASE : Optional[int] = unet.half()
SCREAMING_SNAKE_CASE : Tuple = vae.half()
SCREAMING_SNAKE_CASE : Tuple = bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionPipeline(
unet=lowercase__ , scheduler=lowercase__ , vae=lowercase__ , text_encoder=lowercase__ , tokenizer=lowercase__ , safety_checker=lowercase__ , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
SCREAMING_SNAKE_CASE : Dict = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE : Dict = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowercase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
SCREAMING_SNAKE_CASE : List[Any] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
SCREAMING_SNAKE_CASE : str = 4_003_660_346
SCREAMING_SNAKE_CASE : Union[str, Any] = 7
# without safety guidance (sld_guidance_scale = 0)
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(lowercase__ )
SCREAMING_SNAKE_CASE : Tuple = sd_pipe(
[prompt] , generator=lowercase__ , guidance_scale=lowercase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : int = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(lowercase__ )
SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(
[prompt] , generator=lowercase__ , guidance_scale=lowercase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : List[str] = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowercase__ )
SCREAMING_SNAKE_CASE : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = 'padme amidala taking a bath artwork, safe for work, no nudity'
SCREAMING_SNAKE_CASE : int = 2_734_971_755
SCREAMING_SNAKE_CASE : List[str] = 7
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(lowercase__ )
SCREAMING_SNAKE_CASE : List[str] = sd_pipe(
[prompt] , generator=lowercase__ , guidance_scale=lowercase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE : Tuple = output.images
SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Tuple = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
SCREAMING_SNAKE_CASE : int = torch.manual_seed(lowercase__ )
SCREAMING_SNAKE_CASE : List[str] = sd_pipe(
[prompt] , generator=lowercase__ , guidance_scale=lowercase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE : str = output.images
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Optional[int] = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
SCREAMING_SNAKE_CASE : Dict = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
SCREAMING_SNAKE_CASE : Union[str, Any] = 1_044_355_234
SCREAMING_SNAKE_CASE : Tuple = 12
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(lowercase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe(
[prompt] , generator=lowercase__ , guidance_scale=lowercase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE : Optional[Any] = output.images
SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Any = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(lowercase__ )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe(
[prompt] , generator=lowercase__ , guidance_scale=lowercase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE : int = output.images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : str = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 179 | '''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE : str = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
SCREAMING_SNAKE_CASE : str = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(lowercase__ ) , torch_builtin(lowercase__ ) ) )
self.assertFalse(torch.allclose(gelu_python(lowercase__ ) , gelu_new(lowercase__ ) ) )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
SCREAMING_SNAKE_CASE : str = get_activation('gelu' )
SCREAMING_SNAKE_CASE : Dict = get_activation('gelu_10' )
SCREAMING_SNAKE_CASE : Optional[int] = torch_builtin(lowercase__ )
SCREAMING_SNAKE_CASE : str = geluaa(lowercase__ )
SCREAMING_SNAKE_CASE : List[Any] = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(lowercase__ ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _UpperCamelCase ( self ) -> Optional[Any]:
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(lowercase__ ):
get_activation('bogus' )
with self.assertRaises(lowercase__ ):
get_activation(lowercase__ )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE : Optional[int] = get_activation('gelu' )
SCREAMING_SNAKE_CASE : str = 1
SCREAMING_SNAKE_CASE : List[str] = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(lowercase__ ):
SCREAMING_SNAKE_CASE : Dict = acta.a
| 179 | 1 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def _lowercase ( self : List[str] ) -> Optional[int]:
lowercase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''depth_multiplier''' ) )
class lowercase__:
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int]=1_3 , SCREAMING_SNAKE_CASE_ : Dict=3 , SCREAMING_SNAKE_CASE_ : Optional[int]=3_2 , SCREAMING_SNAKE_CASE_ : Any=0.25 , SCREAMING_SNAKE_CASE_ : Dict=8 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=8 , SCREAMING_SNAKE_CASE_ : List[str]=6 , SCREAMING_SNAKE_CASE_ : str=3_2 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : str="relu6" , SCREAMING_SNAKE_CASE_ : int=1_2_8_0 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.02 , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Optional[int]=1_0 , SCREAMING_SNAKE_CASE_ : Tuple=None , ) -> Union[str, Any]:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = num_channels
lowercase_ = image_size
lowercase_ = depth_multiplier
lowercase_ = depth_divisible_by
lowercase_ = min_depth
lowercase_ = expand_ratio
lowercase_ = tf_padding
lowercase_ = output_stride
lowercase_ = first_layer_is_expansion
lowercase_ = finegrained_output
lowercase_ = hidden_act
lowercase_ = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
lowercase_ = classifier_dropout_prob
lowercase_ = use_labels
lowercase_ = is_training
lowercase_ = num_labels
lowercase_ = initializer_range
lowercase_ = scope
def _lowercase ( self : int ) -> str:
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowercase ( self : int ) -> Tuple:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
lowercase_ = MobileNetVaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> List[str]:
lowercase_ = self.num_labels
lowercase_ = MobileNetVaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict:
lowercase_ = self.num_labels
lowercase_ = MobileNetVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowercase_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowercase ( self : Optional[Any] ) -> Dict:
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :List[str] = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
a :Optional[Any] = (
{
'feature-extraction': MobileNetVaModel,
'image-classification': MobileNetVaForImageClassification,
'image-segmentation': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a :Optional[Any] = False
a :List[str] = False
a :List[Any] = False
a :Any = False
def _lowercase ( self : int ) -> str:
lowercase_ = MobileNetVaModelTester(self )
lowercase_ = MobileNetVaConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[int] ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' )
def _lowercase ( self : str ) -> str:
pass
@unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' )
def _lowercase ( self : List[Any] ) -> Any:
pass
@unittest.skip(reason='''MobileNetV2 does not output attentions''' )
def _lowercase ( self : str ) -> Optional[Any]:
pass
def _lowercase ( self : Tuple ) -> int:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> List[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] ) -> List[Any]:
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple ):
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowercase_ = outputs.hidden_states
lowercase_ = 1_6
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : str ) -> List[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Tuple ) -> Optional[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
@slow
def _lowercase ( self : Tuple ) -> Optional[int]:
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = MobileNetVaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def a ( ):
'''simple docstring'''
lowercase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None
)
@slow
def _lowercase ( self : Dict ) -> List[str]:
lowercase_ = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
lowercase_ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
lowercase_ = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.tensor([0.24_45, -1.19_93, 0.19_05] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
@slow
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
lowercase_ = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
lowercase_ = model.to(SCREAMING_SNAKE_CASE_ )
lowercase_ = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
lowercase_ = prepare_img()
lowercase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
lowercase_ = model(**SCREAMING_SNAKE_CASE_ )
lowercase_ = outputs.logits
# verify the logits
lowercase_ = torch.Size((1, 2_1, 6_5, 6_5) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.tensor(
[
[[17.57_90, 17.75_81, 18.33_55], [18.32_57, 18.42_30, 18.89_73], [18.61_69, 18.86_50, 19.21_87]],
[[-2.15_95, -2.09_77, -2.37_41], [-2.42_26, -2.30_28, -2.68_35], [-2.78_19, -2.59_91, -2.77_06]],
[[4.20_58, 4.83_17, 4.76_38], [4.41_36, 5.03_61, 4.93_83], [4.50_28, 4.96_44, 4.87_34]],
] , device=SCREAMING_SNAKE_CASE_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 97 |
from __future__ import annotations
def a ( snake_case__: list[list[int]] ):
'''simple docstring'''
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(snake_case__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(snake_case__ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 97 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __snake_case ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,unittest.TestCase):
'''simple docstring'''
UpperCamelCase__ : Dict = AltDiffusionPipeline
UpperCamelCase__ : Tuple = TEXT_TO_IMAGE_PARAMS
UpperCamelCase__ : str = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase__ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase__ : str = TEXT_TO_IMAGE_IMAGE_PARAMS
def _a ( self ):
torch.manual_seed(0 )
a__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
a__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=a_ , set_alpha_to_one=a_ , )
torch.manual_seed(0 )
a__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
a__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_002 , )
a__ = CLIPTextModel(a_ )
a__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
a__ = 77
a__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a ( self , a_ , a_=0 ):
if str(a_ ).startswith("""mps""" ):
a__ = torch.manual_seed(a_ )
else:
a__ = torch.Generator(device=a_ ).manual_seed(a_ )
a__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def _a ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _a ( self ):
a__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
a__ = self.get_dummy_components()
torch.manual_seed(0 )
a__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
a__ = RobertaSeriesModelWithTransformation(a_ )
a__ = text_encoder
a__ = AltDiffusionPipeline(**a_ )
a__ = alt_pipe.to(a_ )
alt_pipe.set_progress_bar_config(disable=a_ )
a__ = self.get_dummy_inputs(a_ )
a__ = """A photo of an astronaut"""
a__ = alt_pipe(**a_ )
a__ = output.images
a__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__ = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ):
a__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
a__ = self.get_dummy_components()
a__ = PNDMScheduler(skip_prk_steps=a_ )
torch.manual_seed(0 )
a__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
a__ = RobertaSeriesModelWithTransformation(a_ )
a__ = text_encoder
a__ = AltDiffusionPipeline(**a_ )
a__ = alt_pipe.to(a_ )
alt_pipe.set_progress_bar_config(disable=a_ )
a__ = self.get_dummy_inputs(a_ )
a__ = alt_pipe(**a_ )
a__ = output.images
a__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__ = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase):
'''simple docstring'''
def _a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ):
# make sure here that pndm scheduler skips prk
a__ = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , safety_checker=a_ )
a__ = alt_pipe.to(a_ )
alt_pipe.set_progress_bar_config(disable=a_ )
a__ = """A painting of a squirrel eating a burger"""
a__ = torch.manual_seed(0 )
a__ = alt_pipe([prompt] , generator=a_ , guidance_scale=6.0 , num_inference_steps=20 , output_type="""np""" )
a__ = output.images
a__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a__ = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ):
a__ = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" , subfolder="""scheduler""" )
a__ = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , scheduler=a_ , safety_checker=a_ )
a__ = alt_pipe.to(a_ )
alt_pipe.set_progress_bar_config(disable=a_ )
a__ = """A painting of a squirrel eating a burger"""
a__ = torch.manual_seed(0 )
a__ = alt_pipe([prompt] , generator=a_ , num_inference_steps=2 , output_type="""numpy""" )
a__ = output.images
a__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a__ = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 708 |
def A_ ( __a : int , __a : int ):
"""simple docstring"""
return abs(__a ) if a == 0 else greatest_common_divisor(b % a , __a )
def A_ ( __a : int , __a : int ):
"""simple docstring"""
while y: # --> when y=0 then loop will terminate and return x as final GCD.
a__ , a__ = y, x % y
return abs(__a )
def A_ ( ):
"""simple docstring"""
try:
a__ = input("""Enter two integers separated by comma (,): """ ).split(""",""" )
a__ = int(nums[0] )
a__ = int(nums[1] )
print(
F'''greatest_common_divisor({num_a}, {num_a}) = '''
F'''{greatest_common_divisor(__a , __a )}''' )
print(F'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__a , __a )}''' )
except (IndexError, UnboundLocalError, ValueError):
print("""Wrong input""" )
if __name__ == "__main__":
main()
| 351 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Tuple = {'''vocab_file''': '''spm_char.model'''}
_SCREAMING_SNAKE_CASE : List[Any] = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
_SCREAMING_SNAKE_CASE : str = {
'''microsoft/speecht5_asr''': 1024,
'''microsoft/speecht5_tts''': 1024,
'''microsoft/speecht5_vc''': 1024,
}
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int="<s>" , __SCREAMING_SNAKE_CASE : Union[str, Any]="</s>" , __SCREAMING_SNAKE_CASE : str="<unk>" , __SCREAMING_SNAKE_CASE : str="<pad>" , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> None:
lowerCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
@property
def UpperCamelCase ( self : Any ) -> List[Any]:
return self.sp_model.get_piece_size()
def UpperCamelCase ( self : int ) -> List[str]:
lowerCamelCase_ = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) -> Optional[int]:
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = None
return state
def __setstate__( self : str , __SCREAMING_SNAKE_CASE : List[str] ) -> Any:
lowerCamelCase_ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCamelCase_ = {}
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : str , __SCREAMING_SNAKE_CASE : str ) -> Dict:
return self.sp_model.piece_to_id(__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
lowerCamelCase_ = self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
return token
def UpperCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[Any]:
lowerCamelCase_ = []
lowerCamelCase_ = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
lowerCamelCase_ = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = [1]
if token_ids_a is None:
return ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
return ([0] * len(__SCREAMING_SNAKE_CASE )) + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , 'wb' ) as fi:
lowerCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 549 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase : list[list[int | float]] ) -> int:
lowerCamelCase_ = len(_lowerCamelCase )
lowerCamelCase_ = len(matrix[0] )
lowerCamelCase_ = min(_lowerCamelCase , _lowerCamelCase )
for row in range(_lowerCamelCase ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , _lowerCamelCase ):
lowerCamelCase_ = matrix[col][row] / matrix[row][row]
for i in range(_lowerCamelCase , _lowerCamelCase ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
lowerCamelCase_ = True
for i in range(row + 1 , _lowerCamelCase ):
if matrix[i][row] != 0:
lowerCamelCase_ , lowerCamelCase_ = matrix[i], matrix[row]
lowerCamelCase_ = False
break
if reduce:
rank -= 1
for i in range(_lowerCamelCase ):
lowerCamelCase_ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 549 | 1 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Tuple =logging.get_logger(__name__)
def _UpperCamelCase ( lowercase__ ):
print('''Loading config file...''' )
def flatten_yaml_as_dict(lowercase__ , lowercase__="" , lowercase__="." ):
__SCREAMING_SNAKE_CASE : str = []
for k, v in d.items():
__SCREAMING_SNAKE_CASE : Tuple = parent_key + sep + k if parent_key else k
if isinstance(lowercase__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(lowercase__ , lowercase__ , sep=lowercase__ ).items() )
else:
items.append((new_key, v) )
return dict(lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = argparse.Namespace()
with open(lowercase__ , '''r''' ) as yaml_file:
try:
__SCREAMING_SNAKE_CASE : Dict = yaml.load(lowercase__ , Loader=yaml.FullLoader )
__SCREAMING_SNAKE_CASE : Union[str, Any] = flatten_yaml_as_dict(lowercase__ )
for k, v in flat_cfg.items():
setattr(lowercase__ , lowercase__ , lowercase__ )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(lowercase__ , str(lowercase__ ) ) )
return config
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTVaConfig()
__SCREAMING_SNAKE_CASE : str = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
__SCREAMING_SNAKE_CASE : Union[str, Any] = 384
else:
__SCREAMING_SNAKE_CASE : Optional[int] = 256
__SCREAMING_SNAKE_CASE : Dict = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
__SCREAMING_SNAKE_CASE : Any = 21000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
__SCREAMING_SNAKE_CASE : List[Any] = 384
else:
__SCREAMING_SNAKE_CASE : Any = 256
__SCREAMING_SNAKE_CASE : str = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
__SCREAMING_SNAKE_CASE : Dict = 151
__SCREAMING_SNAKE_CASE : List[str] = 512
__SCREAMING_SNAKE_CASE : Optional[Any] = '''ade20k-id2label.json'''
__SCREAMING_SNAKE_CASE : str = True
elif task_name.startswith('''voc_''' ):
__SCREAMING_SNAKE_CASE : Dict = 21
__SCREAMING_SNAKE_CASE : Dict = 512
__SCREAMING_SNAKE_CASE : Optional[int] = '''pascal-voc-id2label.json'''
__SCREAMING_SNAKE_CASE : Dict = True
# orig_config
__SCREAMING_SNAKE_CASE : Any = load_orig_config_file(lowercase__ )
assert getattr(lowercase__ , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
__SCREAMING_SNAKE_CASE : List[str] = getattr(lowercase__ , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(lowercase__ , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
__SCREAMING_SNAKE_CASE : int = getattr(lowercase__ , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
__SCREAMING_SNAKE_CASE : int = getattr(lowercase__ , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(lowercase__ , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
__SCREAMING_SNAKE_CASE : Tuple = getattr(lowercase__ , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
__SCREAMING_SNAKE_CASE : Optional[int] = getattr(lowercase__ , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
__SCREAMING_SNAKE_CASE : List[Any] = '''huggingface/label-files'''
__SCREAMING_SNAKE_CASE : int = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='''dataset''' ) , '''r''' ) )
__SCREAMING_SNAKE_CASE : Optional[int] = {int(lowercase__ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : List[Any] = idalabel
__SCREAMING_SNAKE_CASE : List[str] = {v: k for k, v in idalabel.items()}
return config
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = dct.pop(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = val
def _UpperCamelCase ( lowercase__ , lowercase__=False ):
if base_model:
__SCREAMING_SNAKE_CASE : Optional[int] = ''''''
else:
__SCREAMING_SNAKE_CASE : int = '''mobilevitv2.'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
__SCREAMING_SNAKE_CASE : Optional[Any] = k[8:]
else:
__SCREAMING_SNAKE_CASE : str = k
if ".block." in k:
__SCREAMING_SNAKE_CASE : Optional[Any] = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
__SCREAMING_SNAKE_CASE : Tuple = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
__SCREAMING_SNAKE_CASE : str = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
__SCREAMING_SNAKE_CASE : Union[str, Any] = k_new.replace('''conv_1.''' , F'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if F'''layer_{i}.''' in k:
__SCREAMING_SNAKE_CASE : Optional[int] = k_new.replace(F'''layer_{i}.''' , F'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
__SCREAMING_SNAKE_CASE : List[str] = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
__SCREAMING_SNAKE_CASE : Union[str, Any] = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if F'''layer_{i}.0.''' in k:
__SCREAMING_SNAKE_CASE : Dict = k_new.replace(F'''layer_{i}.0.''' , F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if F'''layer_{i}.1.local_rep.0.''' in k:
__SCREAMING_SNAKE_CASE : List[Any] = k_new.replace(F'''layer_{i}.1.local_rep.0.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if F'''layer_{i}.1.local_rep.1.''' in k:
__SCREAMING_SNAKE_CASE : Dict = k_new.replace(F'''layer_{i}.1.local_rep.1.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
__SCREAMING_SNAKE_CASE : Tuple = [0, 1]
elif i == 4:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 1, 2, 3]
elif i == 5:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 1, 2]
for j in j_in:
if F'''layer_{i}.1.global_rep.{j}.''' in k:
__SCREAMING_SNAKE_CASE : str = k_new.replace(
F'''layer_{i}.1.global_rep.{j}.''' , F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if F'''layer_{i}.1.global_rep.{j+1}.''' in k:
__SCREAMING_SNAKE_CASE : int = k_new.replace(
F'''layer_{i}.1.global_rep.{j+1}.''' , F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if F'''layer_{i}.1.conv_proj.''' in k:
__SCREAMING_SNAKE_CASE : Dict = k_new.replace(F'''layer_{i}.1.conv_proj.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
__SCREAMING_SNAKE_CASE : List[str] = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
__SCREAMING_SNAKE_CASE : Optional[Any] = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
__SCREAMING_SNAKE_CASE : Optional[int] = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
__SCREAMING_SNAKE_CASE : Any = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
__SCREAMING_SNAKE_CASE : int = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
__SCREAMING_SNAKE_CASE : Tuple = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
__SCREAMING_SNAKE_CASE : Tuple = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
__SCREAMING_SNAKE_CASE : Optional[Any] = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
__SCREAMING_SNAKE_CASE : int = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : str = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(lowercase__ )
for k in keys_to_ignore:
state_dict.pop(lowercase__ , lowercase__ )
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
__SCREAMING_SNAKE_CASE : int = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = get_mobilevitva_config(lowercase__ , lowercase__ )
# load original state_dict
__SCREAMING_SNAKE_CASE : Optional[int] = torch.load(lowercase__ , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
__SCREAMING_SNAKE_CASE : Optional[int] = MobileViTVaForSemanticSegmentation(lowercase__ ).eval()
__SCREAMING_SNAKE_CASE : Optional[Any] = False
else:
__SCREAMING_SNAKE_CASE : List[Any] = MobileViTVaForImageClassification(lowercase__ ).eval()
__SCREAMING_SNAKE_CASE : Dict = False
# remove and rename some keys of load the original model
__SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint
remove_unused_keys(lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = create_rename_keys(lowercase__ , base_model=lowercase__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
# load modified state_dict
model.load_state_dict(lowercase__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
__SCREAMING_SNAKE_CASE : List[str] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
__SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=prepare_img() , return_tensors='''pt''' )
__SCREAMING_SNAKE_CASE : Any = model(**lowercase__ )
# verify classification model
if task_name.startswith('''imagenet''' ):
__SCREAMING_SNAKE_CASE : Any = outputs.logits
__SCREAMING_SNAKE_CASE : Union[str, Any] = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] )
assert torch.allclose(logits[0, :3] , lowercase__ , atol=1e-4 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
__lowerCAmelCase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
__lowerCAmelCase : List[str] =parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 260 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _UpperCamelCase ( lowercase__ , lowercase__=0.999 , lowercase__="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE : Optional[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) )
return torch.tensor(lowercase__ , dtype=torch.floataa )
class _lowercase ( A__ , A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = [e.name for e in KarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE__ : str = 2
@register_to_config
def __init__( self :Tuple , lowerCAmelCase__ :int = 1_000 , lowerCAmelCase__ :float = 0.0_0085 , lowerCAmelCase__ :float = 0.012 , lowerCAmelCase__ :str = "linear" , lowerCAmelCase__ :Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase__ :str = "epsilon" , lowerCAmelCase__ :str = "linspace" , lowerCAmelCase__ :int = 0 , ) -> List[Any]:
if trained_betas is not None:
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
__SCREAMING_SNAKE_CASE : Tuple = torch.linspace(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__SCREAMING_SNAKE_CASE : Dict = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__SCREAMING_SNAKE_CASE : Optional[Any] = betas_for_alpha_bar(lowerCAmelCase__ )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__SCREAMING_SNAKE_CASE : Dict = 1.0 - self.betas
__SCREAMING_SNAKE_CASE : Tuple = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Any , lowerCAmelCase__ :int , lowerCAmelCase__ :Any=None ) -> Tuple:
if schedule_timesteps is None:
__SCREAMING_SNAKE_CASE : Any = self.timesteps
__SCREAMING_SNAKE_CASE : Tuple = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1 if len(lowerCAmelCase__ ) > 1 else 0
else:
__SCREAMING_SNAKE_CASE : str = timestep.cpu().item() if torch.is_tensor(lowerCAmelCase__ ) else timestep
__SCREAMING_SNAKE_CASE : Optional[int] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __magic_name__( self :Optional[Any] ) -> List[str]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __magic_name__( self :Any , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :Union[float, torch.FloatTensor] , ) -> torch.FloatTensor:
__SCREAMING_SNAKE_CASE : str = self.index_for_timestep(lowerCAmelCase__ )
if self.state_in_first_order:
__SCREAMING_SNAKE_CASE : Any = self.sigmas[step_index]
else:
__SCREAMING_SNAKE_CASE : Tuple = self.sigmas_interpol[step_index]
__SCREAMING_SNAKE_CASE : List[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __magic_name__( self :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, torch.device] = None , lowerCAmelCase__ :Optional[int] = None , ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = num_inference_steps
__SCREAMING_SNAKE_CASE : Optional[int] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__SCREAMING_SNAKE_CASE : str = np.linspace(0 , num_train_timesteps - 1 , lowerCAmelCase__ , dtype=lowerCAmelCase__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__SCREAMING_SNAKE_CASE : Any = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE : Any = (np.arange(0 , lowerCAmelCase__ ) * step_ratio).round()[::-1].copy().astype(lowerCAmelCase__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE : Optional[Any] = (np.arange(lowerCAmelCase__ , 0 , -step_ratio )).round().copy().astype(lowerCAmelCase__ )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
__SCREAMING_SNAKE_CASE : Optional[int] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(np.log(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = np.interp(lowerCAmelCase__ , np.arange(0 , len(lowerCAmelCase__ ) ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ )
# interpolate sigmas
__SCREAMING_SNAKE_CASE : int = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__SCREAMING_SNAKE_CASE : Dict = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__SCREAMING_SNAKE_CASE : Any = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(lowerCAmelCase__ ).startswith('''mps''' ):
# mps does not support float64
__SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ , dtype=torch.floataa )
else:
__SCREAMING_SNAKE_CASE : str = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
# interpolate timesteps
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.sigma_to_t(lowerCAmelCase__ ).to(lowerCAmelCase__ , dtype=timesteps.dtype )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__SCREAMING_SNAKE_CASE : int = torch.cat([timesteps[:1], interleaved_timesteps] )
__SCREAMING_SNAKE_CASE : Dict = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__SCREAMING_SNAKE_CASE : List[str] = defaultdict(lowerCAmelCase__ )
def __magic_name__( self :int , lowerCAmelCase__ :List[Any] ) -> Union[str, Any]:
# get log sigma
__SCREAMING_SNAKE_CASE : int = sigma.log()
# get distribution
__SCREAMING_SNAKE_CASE : str = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__SCREAMING_SNAKE_CASE : Any = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__SCREAMING_SNAKE_CASE : Any = low_idx + 1
__SCREAMING_SNAKE_CASE : Tuple = self.log_sigmas[low_idx]
__SCREAMING_SNAKE_CASE : Dict = self.log_sigmas[high_idx]
# interpolate sigmas
__SCREAMING_SNAKE_CASE : List[Any] = (low - log_sigma) / (low - high)
__SCREAMING_SNAKE_CASE : Dict = w.clamp(0 , 1 )
# transform interpolation to time range
__SCREAMING_SNAKE_CASE : Tuple = (1 - w) * low_idx + w * high_idx
__SCREAMING_SNAKE_CASE : List[Any] = t.view(sigma.shape )
return t
@property
def __magic_name__( self :Union[str, Any] ) -> Optional[int]:
return self.sample is None
def __magic_name__( self :List[Any] , lowerCAmelCase__ :Union[torch.FloatTensor, np.ndarray] , lowerCAmelCase__ :Union[float, torch.FloatTensor] , lowerCAmelCase__ :Union[torch.FloatTensor, np.ndarray] , lowerCAmelCase__ :bool = True , ) -> Union[SchedulerOutput, Tuple]:
__SCREAMING_SNAKE_CASE : Dict = self.index_for_timestep(lowerCAmelCase__ )
# advance index counter by 1
__SCREAMING_SNAKE_CASE : Tuple = timestep.cpu().item() if torch.is_tensor(lowerCAmelCase__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__SCREAMING_SNAKE_CASE : Dict = self.sigmas[step_index]
__SCREAMING_SNAKE_CASE : Dict = self.sigmas_interpol[step_index + 1]
__SCREAMING_SNAKE_CASE : List[str] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__SCREAMING_SNAKE_CASE : List[str] = self.sigmas[step_index - 1]
__SCREAMING_SNAKE_CASE : Any = self.sigmas_interpol[step_index]
__SCREAMING_SNAKE_CASE : Optional[int] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : List[str] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__SCREAMING_SNAKE_CASE : str = sigma_hat if self.state_in_first_order else sigma_interpol
__SCREAMING_SNAKE_CASE : Optional[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__SCREAMING_SNAKE_CASE : Any = sigma_hat if self.state_in_first_order else sigma_interpol
__SCREAMING_SNAKE_CASE : List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__SCREAMING_SNAKE_CASE : int = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__SCREAMING_SNAKE_CASE : List[str] = sigma_interpol - sigma_hat
# store for 2nd order step
__SCREAMING_SNAKE_CASE : List[str] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__SCREAMING_SNAKE_CASE : List[str] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__SCREAMING_SNAKE_CASE : Any = sigma_next - sigma_hat
__SCREAMING_SNAKE_CASE : Optional[int] = self.sample
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : Tuple = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase__ )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :torch.FloatTensor , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__SCREAMING_SNAKE_CASE : Tuple = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCAmelCase__ ):
# mps does not support float64
__SCREAMING_SNAKE_CASE : Tuple = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Union[str, Any] = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.timesteps.to(original_samples.device )
__SCREAMING_SNAKE_CASE : List[str] = timesteps.to(original_samples.device )
__SCREAMING_SNAKE_CASE : Dict = [self.index_for_timestep(lowerCAmelCase__ , lowerCAmelCase__ ) for t in timesteps]
__SCREAMING_SNAKE_CASE : Optional[Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__SCREAMING_SNAKE_CASE : List[Any] = sigma.unsqueeze(-1 )
__SCREAMING_SNAKE_CASE : Optional[Any] = original_samples + noise * sigma
return noisy_samples
def __len__( self :Tuple ) -> Optional[Any]:
return self.config.num_train_timesteps
| 260 | 1 |
'''simple docstring'''
import os
from pathlib import Path
def UpperCamelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] ):
A__ = {
"en": "Machine learning is great, isn\'t it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A__ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
A__ = F"{src_lang}-{tgt_lang}"
A__ = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
A__ = os.path.join(_lowerCamelCase , "README.md" )
print(F"Generating {path}" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(_lowerCamelCase )
# make sure we are under the root of the project
__lowerCAmelCase : int =Path(__file__).resolve().parent.parent.parent
__lowerCAmelCase : Optional[int] =repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[str] =model_name.split("-")
__lowerCAmelCase : List[str] =model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 440 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = list[list[int]]
# assigning initial values to the grid
__UpperCAmelCase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__UpperCAmelCase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _snake_case ( A , A , A , A ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _snake_case ( A ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _snake_case ( A ) -> Matrix | None:
if location := find_empty_location(A ):
lowerCAmelCase__ , lowerCAmelCase__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(A , A , A , A ):
lowerCAmelCase__ = digit
if sudoku(A ) is not None:
return grid
lowerCAmelCase__ = 0
return None
def _snake_case ( A ) -> None:
for row in grid:
for cell in row:
print(A , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
__UpperCAmelCase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''') | 90 | 0 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 1_2_8,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 5_0,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 1_0,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 1_0,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@classmethod
def snake_case__ ( cls ):
_lowerCamelCase = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def snake_case__ ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def snake_case__ ( self ):
_lowerCamelCase = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('''test-config''' , use_auth_token=self._token )
_lowerCamelCase = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ , repo_id='''test-config''' , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
_lowerCamelCase = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
def snake_case__ ( self ):
_lowerCamelCase = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token )
_lowerCamelCase = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase__ , repo_id='''valid_org/test-config-org''' , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
_lowerCamelCase = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
def snake_case__ ( self ):
CustomConfig.register_for_auto_class()
_lowerCamelCase = CustomConfig(attribute=4_2 )
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
_lowerCamelCase = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' )
self.assertEqual(new_config.attribute , 4_2 )
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_lowerCamelCase = c.n_embd + 1 # int
_lowerCamelCase = c.resid_pdrop + 1.0 # float
_lowerCamelCase = not c.scale_attn_weights # bool
_lowerCamelCase = c.summary_type + '''foo''' # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(lowerCamelCase__ , c.n_embd , '''mismatch for key: n_embd''' )
self.assertEqual(lowerCamelCase__ , c.resid_pdrop , '''mismatch for key: resid_pdrop''' )
self.assertEqual(lowerCamelCase__ , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' )
self.assertEqual(lowerCamelCase__ , c.summary_type , '''mismatch for key: summary_type''' )
def snake_case__ ( self ):
_lowerCamelCase = PretrainedConfig()
_lowerCamelCase = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase__ , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
_lowerCamelCase = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase__ , lowerCamelCase__ )]
if len(lowerCamelCase__ ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
F""" {", ".join(lowerCamelCase__ )}.""" )
def snake_case__ ( self ):
with self.assertRaises(lowerCamelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
_lowerCamelCase = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
_lowerCamelCase = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' )
self.assertIsNotNone(lowerCamelCase__ )
def snake_case__ ( self ):
# A mock response for an HTTP head request to emulate server down
_lowerCamelCase = mock.Mock()
_lowerCamelCase = 5_0_0
_lowerCamelCase = {}
_lowerCamelCase = HTTPError
_lowerCamelCase = {}
# Download this model to make sure it's in the cache.
_lowerCamelCase = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowerCamelCase__ ) as mock_head:
_lowerCamelCase = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case__ ( self ):
# This test is for deprecated behavior and can be removed in v5
_lowerCamelCase = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def snake_case__ ( self ):
_lowerCamelCase = AutoConfig.from_pretrained('''bert-base-cased''' )
_lowerCamelCase = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase__ )
_lowerCamelCase = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCamelCase__ , '''config.4.0.0.json''' ) , '''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_lowerCamelCase = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_lowerCamelCase = ['''config.42.0.0.json''']
_lowerCamelCase = 7_6_8
configuration.save_pretrained(lowerCamelCase__ )
shutil.move(os.path.join(lowerCamelCase__ , '''config.4.0.0.json''' ) , os.path.join(lowerCamelCase__ , '''config.42.0.0.json''' ) )
_lowerCamelCase = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def snake_case__ ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
_lowerCamelCase = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
_lowerCamelCase = '''v4.0.0'''
_lowerCamelCase , _lowerCamelCase = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_lowerCamelCase = '''v3.0.0'''
_lowerCamelCase = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 623 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__SCREAMING_SNAKE_CASE : Optional[Any] = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def lowerCAmelCase_( lowercase_ : List[Any] ) -> Optional[Any]:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_ )
def lowerCAmelCase_( lowercase_ : List[str] ) -> List[str]:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_lowerCamelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_ )
| 623 | 1 |
def _UpperCAmelCase ( UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
while b:
__lowerCAmelCase , __lowerCAmelCase = b, a % b
return a
def _UpperCAmelCase ( UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(UpperCamelCase , a % b )
def _UpperCAmelCase ( ):
"""simple docstring"""
print(F"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(F"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(F"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(F"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(F"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(F"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(F"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(F"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 611 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : List[str] , snake_case__ : Optional[int] ):
"""simple docstring"""
return F"gaussian_noise_s={seed}_shape={'_'.join([str(snake_case__ ) for s in shape] )}.npy"
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
def UpperCAmelCase__ ( self : int , snake_case__ : List[str]=0 , snake_case__ : int=(4, 4, 64, 64) , snake_case__ : Union[str, Any]=False ):
"""simple docstring"""
__lowerCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__lowerCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(snake_case__ , snake_case__ ) ) , dtype=snake_case__ )
return image
def UpperCAmelCase__ ( self : str , snake_case__ : Any=False , snake_case__ : List[Any]="CompVis/stable-diffusion-v1-4" ):
"""simple docstring"""
__lowerCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__lowerCAmelCase = "bf16" if fpaa else None
__lowerCAmelCase , __lowerCAmelCase = FlaxUNetaDConditionModel.from_pretrained(
snake_case__ , subfolder="unet" , dtype=snake_case__ , revision=snake_case__ )
return model, params
def UpperCAmelCase__ ( self : Any , snake_case__ : Tuple=0 , snake_case__ : Dict=(4, 77, 768) , snake_case__ : List[str]=False ):
"""simple docstring"""
__lowerCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__lowerCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(snake_case__ , snake_case__ ) ) , dtype=snake_case__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[17, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1_000, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def UpperCAmelCase__ ( self : Dict , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=snake_case__ )
__lowerCAmelCase = self.get_latents(snake_case__ , fpaa=snake_case__ )
__lowerCAmelCase = self.get_encoder_hidden_states(snake_case__ , fpaa=snake_case__ )
__lowerCAmelCase = model.apply(
{"params": params} , snake_case__ , jnp.array(snake_case__ , dtype=jnp.intaa ) , encoder_hidden_states=snake_case__ , ).sample
assert sample.shape == latents.shape
__lowerCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowerCAmelCase = jnp.array(snake_case__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(snake_case__ , snake_case__ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[17, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1_000, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def UpperCAmelCase__ ( self : Any , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Dict ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=snake_case__ )
__lowerCAmelCase = self.get_latents(snake_case__ , shape=(4, 4, 96, 96) , fpaa=snake_case__ )
__lowerCAmelCase = self.get_encoder_hidden_states(snake_case__ , shape=(4, 77, 1_024) , fpaa=snake_case__ )
__lowerCAmelCase = model.apply(
{"params": params} , snake_case__ , jnp.array(snake_case__ , dtype=jnp.intaa ) , encoder_hidden_states=snake_case__ , ).sample
assert sample.shape == latents.shape
__lowerCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowerCAmelCase = jnp.array(snake_case__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(snake_case__ , snake_case__ , atol=1E-2 )
| 611 | 1 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
lowercase__ = "\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n"
lowercase__ = "\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n"
lowercase__ = R"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def a_ ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def a_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> str:
_a = spearmanr(__UpperCamelCase , __UpperCamelCase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 276 |
'''simple docstring'''
from manim import *
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def a_ ( self ) -> str:
_a = Rectangle(height=0.5 , width=0.5 )
_a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_a = [mem.copy() for i in range(6 )]
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
_a = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
_a = VGroup(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
_a = Text("CPU" , font_size=24 )
_a = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCamelCase )
_a = [mem.copy() for i in range(1 )]
_a = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
_a = Text("GPU" , font_size=24 )
_a = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
gpu.align_to(__UpperCamelCase , __UpperCamelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(__UpperCamelCase )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
_a = Text("Model" , font_size=24 )
_a = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(__UpperCamelCase , run_time=1 ) , Create(__UpperCamelCase , run_time=1 ) , Create(__UpperCamelCase , run_time=1 ) , )
_a = MarkupText(
f"First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM." , font_size=24 , )
_a = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_a = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCamelCase , run_time=2.5 ) , Write(__UpperCamelCase ) , Write(__UpperCamelCase ) )
self.add(__UpperCamelCase )
_a = []
_a = []
_a = []
for i, rect in enumerate(__UpperCamelCase ):
_a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__UpperCamelCase , opacity=0.7 )
cpu_target.move_to(__UpperCamelCase )
cpu_target.generate_target()
_a = 0.46 / 4
_a = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCamelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=__UpperCamelCase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=__UpperCamelCase , buff=0.0 )
cpu_targs.append(__UpperCamelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(__UpperCamelCase ) )
second_animations.append(MoveToTarget(__UpperCamelCase , run_time=1.5 ) )
self.play(*__UpperCamelCase )
self.play(*__UpperCamelCase )
self.wait()
| 276 | 1 |
"""simple docstring"""
from collections import deque
from .hash_table import HashTable
class __snake_case (__SCREAMING_SNAKE_CASE ):
def __init__( self: int , *A_: Optional[int] , **A_: int ):
super().__init__(*A_ , **A_ )
def __a ( self: int , A_: Optional[int] , A_: Tuple ):
__lowerCamelCase = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(A_ )
__lowerCamelCase = self.values[key]
def __a ( self: Dict ):
return (
sum(self.charge_factor - len(A_ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def __a ( self: Optional[Any] , A_: int , A_: Any=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(A_ ) == 0
):
return key
return super()._collision_resolution(A_ , A_ )
| 281 |
def _a ( lowerCamelCase ):
if num < 0:
return False
lowerCamelCase : int = num
lowerCamelCase : int = 0
while num > 0:
lowerCamelCase : str = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __magic_name__ ( unittest.TestCase ):
@property
def _A( self ):
torch.manual_seed(0 )
lowercase =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
@property
def _A( self ):
torch.manual_seed(0 )
lowercase =VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , )
return model
@property
def _A( self ):
torch.manual_seed(0 )
lowercase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(snake_case_ )
def _A( self ):
lowercase =self.dummy_uncond_unet
lowercase =DDIMScheduler()
lowercase =self.dummy_vq_model
lowercase =LDMPipeline(unet=snake_case_ , vqvae=snake_case_ , scheduler=snake_case_ )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
lowercase =torch.manual_seed(0 )
lowercase =ldm(generator=snake_case_ , num_inference_steps=2 , output_type='''numpy''' ).images
lowercase =torch.manual_seed(0 )
lowercase =ldm(generator=snake_case_ , num_inference_steps=2 , output_type='''numpy''' , return_dict=snake_case_ )[0]
lowercase =image[0, -3:, -3:, -1]
lowercase =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase =np.array([0.85_12, 0.8_18, 0.64_11, 0.68_08, 0.44_65, 0.56_18, 0.46, 0.62_31, 0.51_72] )
lowercase =1E-2 if torch_device != '''mps''' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __magic_name__ ( unittest.TestCase ):
def _A( self ):
lowercase =LDMPipeline.from_pretrained('''CompVis/ldm-celebahq-256''' )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
lowercase =torch.manual_seed(0 )
lowercase =ldm(generator=snake_case_ , num_inference_steps=5 , output_type='''numpy''' ).images
lowercase =image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
lowercase =np.array([0.43_99, 0.4_49_75, 0.4_68_25, 0.4_74, 0.43_59, 0.45_81, 0.4_50_95, 0.43_41, 0.44_47] )
lowercase =1E-2 if torch_device != '''mps''' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 145 |
'''simple docstring'''
def UpperCamelCase ( lowercase_ : str , lowercase_ : Tuple ) -> List[Any]:
'''simple docstring'''
lowercase =''''''
for i in table:
res += inp[i - 1]
return res
def UpperCamelCase ( lowercase_ : Any ) -> Dict:
'''simple docstring'''
return data[1:] + data[0]
def UpperCamelCase ( lowercase_ : str , lowercase_ : List[str] ) -> List[str]:
'''simple docstring'''
lowercase =''''''
for i in range(len(lowercase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def UpperCamelCase ( lowercase_ : int , lowercase_ : Any ) -> List[str]:
'''simple docstring'''
lowercase =int('''0b''' + data[0] + data[-1] , 2 )
lowercase =int('''0b''' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def UpperCamelCase ( lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Any , lowercase_ : Any , lowercase_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase =message[:4]
lowercase =message[4:]
lowercase =apply_table(lowercase_ , lowercase_ )
lowercase =xor(lowercase_ , lowercase_ )
lowercase =apply_sbox(lowercase_ , temp[:4] ) # noqa: E741
lowercase =apply_sbox(lowercase_ , temp[4:] )
lowercase ='''0''' * (2 - len(lowercase_ )) + l # noqa: E741
lowercase ='''0''' * (2 - len(lowercase_ )) + r
lowercase =apply_table(l + r , lowercase_ )
lowercase =xor(lowercase_ , lowercase_ )
return temp + right
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = input('''Enter 10 bit key: ''')
_UpperCAmelCase : Optional[int] = input('''Enter 8 bit message: ''')
_UpperCAmelCase : Optional[int] = [6, 3, 7, 4, 8, 5, 10, 9]
_UpperCAmelCase : List[str] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_UpperCAmelCase : int = [2, 4, 3, 1]
_UpperCAmelCase : Dict = [2, 6, 3, 1, 4, 8, 5, 7]
_UpperCAmelCase : Optional[int] = [4, 1, 3, 5, 7, 2, 8, 6]
_UpperCAmelCase : Dict = [4, 1, 2, 3, 2, 3, 4, 1]
_UpperCAmelCase : Union[str, Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_UpperCAmelCase : List[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_UpperCAmelCase : Tuple = apply_table(key, paa_table)
_UpperCAmelCase : List[Any] = temp[:5]
_UpperCAmelCase : Optional[Any] = temp[5:]
_UpperCAmelCase : Optional[int] = left_shift(left)
_UpperCAmelCase : Dict = left_shift(right)
_UpperCAmelCase : Any = apply_table(left + right, pa_table)
_UpperCAmelCase : Tuple = left_shift(left)
_UpperCAmelCase : str = left_shift(right)
_UpperCAmelCase : int = left_shift(left)
_UpperCAmelCase : List[Any] = left_shift(right)
_UpperCAmelCase : List[Any] = apply_table(left + right, pa_table)
# encryption
_UpperCAmelCase : List[Any] = apply_table(message, IP)
_UpperCAmelCase : Optional[Any] = function(expansion, sa, sa, keya, temp)
_UpperCAmelCase : Union[str, Any] = temp[4:] + temp[:4]
_UpperCAmelCase : Optional[int] = function(expansion, sa, sa, keya, temp)
_UpperCAmelCase : List[Any] = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
_UpperCAmelCase : Optional[int] = apply_table(CT, IP)
_UpperCAmelCase : Tuple = function(expansion, sa, sa, keya, temp)
_UpperCAmelCase : Dict = temp[4:] + temp[:4]
_UpperCAmelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp)
_UpperCAmelCase : Optional[int] = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 145 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCamelCase__ :
"""simple docstring"""
A__ : List[str] = XGLMConfig
A__ : List[Any] = {}
A__ : Optional[Any] = "gelu"
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=14 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=0.0_2 , ) -> int:
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_labels
A__ = vocab_size
A__ = d_model
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = ffn_dim
A__ = activation_function
A__ = activation_dropout
A__ = attention_dropout
A__ = max_position_embeddings
A__ = initializer_range
A__ = None
A__ = 0
A__ = 2
A__ = 1
def snake_case__ ( self ) -> List[Any]:
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def snake_case__ ( self ) -> int:
A__ = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = self.get_config()
A__ = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def snake_case__ ( self ) -> str:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=SCREAMING_SNAKE_CASE__ , )
def snake_case__ ( self ) -> List[str]:
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
A__ : Dict = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
A__ : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else ()
A__ : Optional[int] = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
A__ : Union[str, Any] = False
A__ : int = False
A__ : Optional[int] = False
def snake_case__ ( self ) -> Optional[int]:
A__ = TFXGLMModelTester(self )
A__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , n_embd=37 )
def snake_case__ ( self ) -> Tuple:
self.config_tester.run_common_tests()
@slow
def snake_case__ ( self ) -> Tuple:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def snake_case__ ( self ) -> List[Any]:
super().test_resize_token_embeddings()
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self , SCREAMING_SNAKE_CASE__=True ) -> List[str]:
A__ = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
A__ = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
A__ = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
A__ = model.generate(SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE__ )
@slow
def snake_case__ ( self ) -> Union[str, Any]:
A__ = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
A__ = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
A__ = tokenizer("Today is a nice day and" , return_tensors="tf" )
A__ = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
A__ = model.generate(SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ , seed=[7, 0] )
A__ = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
A__ = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def snake_case__ ( self ) -> int:
A__ = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
A__ = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
A__ = "left"
# use different length sentences to test batching
A__ = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
A__ = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors="tf" , padding=SCREAMING_SNAKE_CASE__ )
A__ = inputs["input_ids"]
A__ = model.generate(input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=inputs["attention_mask"] , max_new_tokens=12 )
A__ = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
A__ = model.generate(input_ids=SCREAMING_SNAKE_CASE__ , max_new_tokens=12 )
A__ = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
A__ = model.generate(input_ids=SCREAMING_SNAKE_CASE__ , max_new_tokens=12 )
A__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
A__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
A__ = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
A__ = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , [non_padded_sentence, padded_sentence] )
| 104 | '''simple docstring'''
def UpperCamelCase__ ( _lowercase : List[Any] ) -> Dict:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def UpperCamelCase__ ( _lowercase : dict[int, list[int]] ) -> list[tuple[int, int]]:
__UpperCAmelCase: Any = 0
__UpperCAmelCase: List[Any] = len(_lowercase ) # No of vertices in graph
__UpperCAmelCase: Optional[Any] = [0] * n
__UpperCAmelCase: Dict = [False] * n
def dfs(_lowercase : Any , _lowercase : List[Any] , _lowercase : int , _lowercase : Optional[int] ):
__UpperCAmelCase: List[str] = True
__UpperCAmelCase: int = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_lowercase , _lowercase , _lowercase , id_ )
__UpperCAmelCase: Any = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
__UpperCAmelCase: List[str] = min(low[at] , low[to] )
__UpperCAmelCase: list[tuple[int, int]] = []
for i in range(_lowercase ):
if not visited[i]:
dfs(_lowercase , -1 , _lowercase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod() | 523 | 0 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase : int = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase : Optional[int] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase : int = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : List[Any] = len([g for position, g in enumerate(lowerCAmelCase_ ) if g == main_target[position]] )
return (item, float(lowerCAmelCase_ ))
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Tuple = random.randint(0 , len(lowerCAmelCase_ ) - 1 )
__lowercase : Any = parent_a[:random_slice] + parent_a[random_slice:]
__lowercase : int = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : list[str] ):
__lowercase : Union[str, Any] = list(lowerCAmelCase_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__lowercase : Tuple = random.choice(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : tuple[str, float] , lowerCAmelCase_ : list[tuple[str, float]] , lowerCAmelCase_ : list[str] , ):
__lowercase : List[Any] = []
# Generate more children proportionally to the fitness score.
__lowercase : Optional[Any] = int(parent_a[1] * 100 ) + 1
__lowercase : Optional[int] = 10 if child_n >= 10 else child_n
for _ in range(lowerCAmelCase_ ):
__lowercase : Union[str, Any] = population_score[random.randint(0 , lowerCAmelCase_ )][0]
__lowercase : str = crossover(parent_a[0] , lowerCAmelCase_ )
# Append new string to the population list.
pop.append(mutate(lowerCAmelCase_ , lowerCAmelCase_ ) )
pop.append(mutate(lowerCAmelCase_ , lowerCAmelCase_ ) )
return pop
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : list[str] , lowerCAmelCase_ : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__lowercase : str = F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(lowerCAmelCase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
__lowercase : Union[str, Any] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__lowercase : List[Any] = F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(lowerCAmelCase_ )
# Generate random starting population.
__lowercase : Optional[int] = []
for _ in range(lowerCAmelCase_ ):
population.append("""""".join([random.choice(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
__lowercase : List[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowerCAmelCase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__lowercase : Union[str, Any] = [evaluate(lowerCAmelCase_ , lowerCAmelCase_ ) for item in population]
# Check if there is a matching evolution.
__lowercase : Optional[Any] = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x[1] , reverse=lowerCAmelCase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__lowercase : List[str] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowerCAmelCase_ )
# Normalize population score to be between 0 and 1.
__lowercase : Any = [
(item, score / len(lowerCAmelCase_ )) for item, score in population_score
]
# This is selection
for i in range(lowerCAmelCase_ ):
population.extend(select(population_score[int(lowerCAmelCase_ )] , lowerCAmelCase_ , lowerCAmelCase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowerCAmelCase_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase : Any = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
lowerCamelCase : Union[str, Any] = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
lowerCamelCase : List[str] = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
) | 703 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : str = LongformerTokenizer
_A : int = True
_A : Optional[int] = LongformerTokenizerFast
_A : int = True
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__lowercase : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowercase : Optional[int] = {"""unk_token""": """<unk>"""}
__lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__a ) )
def lowerCAmelCase ( self : Optional[int] , **__a : Optional[Any] ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Tuple , **__a : Tuple ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : str , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = """lower newer"""
__lowercase : int = """lower newer"""
return input_text, output_text
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase : Dict = """lower newer"""
__lowercase : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__lowercase : str = tokenizer.tokenize(__a ) # , add_prefix_space=True)
self.assertListEqual(__a , __a )
__lowercase : int = tokens + [tokenizer.unk_token]
__lowercase : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
__lowercase : Optional[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__a )
__lowercase : List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__a )
__lowercase : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Union[str, Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : List[Any] = tokenizer.build_inputs_with_special_tokens(__a )
__lowercase : Any = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Tuple = """Encode this sequence."""
__lowercase : Optional[Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
__lowercase : Dict = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__a , __a )
__lowercase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__a , __a )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
__lowercase : str = tokenizer.encode(__a , add_special_tokens=__a )
__lowercase : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__a , __a )
# Testing spaces after special tokens
__lowercase : List[Any] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__a , lstrip=__a , rstrip=__a )} ) # mask token has a left space
__lowercase : Dict = tokenizer.convert_tokens_to_ids(__a )
__lowercase : List[str] = """Encode <mask> sequence"""
__lowercase : List[str] = """Encode <mask>sequence"""
__lowercase : Union[str, Any] = tokenizer.encode(__a )
__lowercase : Dict = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__a , __a )
__lowercase : int = tokenizer.encode(__a )
__lowercase : Union[str, Any] = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__a , __a )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
pass
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__lowercase : List[Any] = self.tokenizer_class.from_pretrained(__a , **__a )
__lowercase : Optional[Any] = """A, <mask> AllenNLP sentence."""
__lowercase : Union[str, Any] = tokenizer_r.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
__lowercase : Optional[Any] = tokenizer_p.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__lowercase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__lowercase : Dict = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__lowercase : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""trim_offsets"""] , __a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
__lowercase : int = F"{text_of_1_token} {text_of_1_token}"
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Any = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : int = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ) + 1, 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Dict = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , ) | 649 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : int = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def lowerCamelCase_( _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Tuple = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", "stage2.cls_token") )
return token
def lowerCamelCase_( ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = "imagenet-1k-id2label.json"
_lowerCamelCase : List[Any] = 1000
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : List[Any] = num_labels
_lowerCamelCase : List[Any] = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) ) , "r" ) )
_lowerCamelCase : Optional[int] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Any = idalabel
_lowerCamelCase : List[Any] = {v: k for k, v in idalabel.items()}
_lowerCamelCase : List[str] = CvtConfig(num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
_lowerCamelCase : List[str] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
_lowerCamelCase : Dict = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
_lowerCamelCase : Tuple = [2, 2, 20]
_lowerCamelCase : List[str] = [3, 12, 16]
_lowerCamelCase : Optional[Any] = [192, 768, 1024]
_lowerCamelCase : Union[str, Any] = CvtForImageClassification(_lowerCamelCase )
_lowerCamelCase : Any = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
_lowerCamelCase : Union[str, Any] = image_size
_lowerCamelCase : List[Any] = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )
_lowerCamelCase : Any = OrderedDict()
_lowerCamelCase : Dict = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
_lowerCamelCase : Tuple = list_of_state_dict + cls_token(_lowerCamelCase )
_lowerCamelCase : Optional[int] = list_of_state_dict + embeddings(_lowerCamelCase )
for cnt in range(config.depth[idx] ):
_lowerCamelCase : Dict = list_of_state_dict + attention(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Optional[Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
image_processor.save_pretrained(_lowerCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCAmelCase : Tuple = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path) | 46 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _UpperCAmelCase ( __lowerCamelCase : Optional[int] ) -> str:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Dict:
_snake_case = create_tensor(__lowerCamelCase )
_snake_case = gather(__lowerCamelCase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Tuple:
_snake_case = [state.process_index]
_snake_case = gather_object(__lowerCamelCase )
assert len(__lowerCamelCase ) == state.num_processes, f'''{gathered_obj}, {len(__lowerCamelCase )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def _UpperCAmelCase ( __lowerCamelCase : int ) -> Union[str, Any]:
_snake_case = create_tensor(__lowerCamelCase )
_snake_case = broadcast(__lowerCamelCase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _UpperCAmelCase ( __lowerCamelCase : str ) -> int:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
_snake_case = torch.arange(state.num_processes + 1 ).to(state.device )
else:
_snake_case = torch.arange(state.num_processes ).to(state.device )
_snake_case = pad_across_processes(__lowerCamelCase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _UpperCAmelCase ( __lowerCamelCase : Any ) -> List[str]:
# For now runs on only two processes
if state.num_processes != 2:
return
_snake_case = create_tensor(__lowerCamelCase )
_snake_case = reduce(__lowerCamelCase , '''sum''' )
_snake_case = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(__lowerCamelCase , __lowerCamelCase ), f'''{reduced_tensor} != {truth_tensor}'''
def _UpperCAmelCase ( __lowerCamelCase : int ) -> Optional[int]:
# For now runs on only two processes
if state.num_processes != 2:
return
_snake_case = create_tensor(__lowerCamelCase )
_snake_case = reduce(__lowerCamelCase , '''mean''' )
_snake_case = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(__lowerCamelCase , __lowerCamelCase ), f'''{reduced_tensor} != {truth_tensor}'''
def _UpperCAmelCase ( __lowerCamelCase : Dict ) -> List[Any]:
# For xla_spawn (TPUs)
main()
def _UpperCAmelCase ( ) -> Optional[Any]:
_snake_case = PartialState()
state.print(f'''State: {state}''' )
state.print('''testing gather''' )
test_gather(__lowerCamelCase )
state.print('''testing gather_object''' )
test_gather_object(__lowerCamelCase )
state.print('''testing broadcast''' )
test_broadcast(__lowerCamelCase )
state.print('''testing pad_across_processes''' )
test_pad_across_processes(__lowerCamelCase )
state.print('''testing reduce_sum''' )
test_reduce_sum(__lowerCamelCase )
state.print('''testing reduce_mean''' )
test_reduce_mean(__lowerCamelCase )
if __name__ == "__main__":
main()
| 224 | 0 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase_ = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCAmelCase_ = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
'''simple docstring'''
_A= SavedModel()
_A= []
with open(os.path.join(lowerCAmelCase_ , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
_A= json.load(lowerCAmelCase_ )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(lowerCAmelCase_ )] )
with open(lowerCAmelCase_ , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
_A= set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
_A= sorted(lowerCAmelCase_ )
_A= []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(lowerCAmelCase_ )
if strict and len(lowerCAmelCase_ ) > 0:
raise Exception(F"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops )
elif len(lowerCAmelCase_ ) > 0:
print(F"Found the following incompatible ops for the opset {opset}:" )
print(*lowerCAmelCase_ , sep='\n' )
else:
print(F"The saved model {saved_model_path} can properly be converted with ONNX." )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
UpperCAmelCase_ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset) | 476 | #
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def UpperCamelCase ( *lowerCAmelCase_ ) -> Dict:
'''simple docstring'''
with open(lowerCAmelCase_ , 'r' ) as fh:
fcntl.flock(lowerCAmelCase_ , fcntl.LOCK_EX )
try:
print(*lowerCAmelCase_ )
finally:
fcntl.flock(lowerCAmelCase_ , fcntl.LOCK_UN )
UpperCAmelCase_ = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
UpperCAmelCase_ = torch.device('''cuda''', local_rank)
UpperCAmelCase_ = socket.gethostname()
UpperCAmelCase_ = F"[{hostname}-{local_rank}]"
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
UpperCAmelCase_ = dist.get_rank()
UpperCAmelCase_ = dist.get_world_size()
printflock(F"{gpu} is OK (global rank: {rank}/{world_size})")
dist.barrier()
if rank == 0:
printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}")
except Exception:
printflock(F"{gpu} is broken")
raise | 476 | 1 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__magic_name__ : Any = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
__magic_name__ : str = 10
__magic_name__ : List[Any] = 256
def lowercase__ ( _UpperCamelCase) -> Optional[MinHash]:
"""simple docstring"""
if len(_UpperCamelCase) < MIN_NUM_TOKENS:
return None
UpperCamelCase = MinHash(num_perm=_UpperCamelCase)
for token in set(_UpperCamelCase):
min_hash.update(token.encode())
return min_hash
def lowercase__ ( _UpperCamelCase) -> Set[str]:
"""simple docstring"""
return {t for t in NON_ALPHA.split(_UpperCamelCase) if len(t.strip()) > 0}
class A__ :
'''simple docstring'''
def __init__( self : Optional[int] , *,
_SCREAMING_SNAKE_CASE : float = 0.8_5 , ):
"""simple docstring"""
UpperCamelCase = duplication_jaccard_threshold
UpperCamelCase = NUM_PERM
UpperCamelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
UpperCamelCase = defaultdict(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : MinHash ):
"""simple docstring"""
UpperCamelCase = self._index.query(_SCREAMING_SNAKE_CASE )
if code_key in self._index.keys:
print(f'Duplicate key {code_key}' )
return
self._index.insert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_SCREAMING_SNAKE_CASE )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
UpperCamelCase = []
for base, duplicates in self._duplicate_clusters.items():
UpperCamelCase = [base] + list(_SCREAMING_SNAKE_CASE )
# reformat the cluster to be a list of dict
UpperCamelCase = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(_SCREAMING_SNAKE_CASE )
return duplicate_clusters
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
UpperCamelCase = self.get_duplicate_clusters()
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase__ ( _UpperCamelCase) -> str:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = element
UpperCamelCase = get_min_hash([t for t in NON_ALPHA.split(data['content']) if len(t.strip()) > 0])
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowercase__ ( _UpperCamelCase) -> List[Any]:
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_UpperCamelCase , max_queue_size=1_00_00) , chunksize=1_00 , ):
if data is not None:
yield data
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> List[Any]:
"""simple docstring"""
UpperCamelCase = DuplicationIndex(duplication_jaccard_threshold=_UpperCamelCase)
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_UpperCamelCase)) , max_queue_size=1_00)):
di.add(_UpperCamelCase , _UpperCamelCase)
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> float:
"""simple docstring"""
UpperCamelCase = get_tokens(_UpperCamelCase)
UpperCamelCase = get_tokens(_UpperCamelCase)
return len(tokensa & tokensa) / len(tokensa | tokensa)
__magic_name__ : str = None
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = []
for elementa in cluster:
UpperCamelCase = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
UpperCamelCase = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(_UpperCamelCase , _UpperCamelCase) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
UpperCamelCase = 1
extremes.append(_UpperCamelCase)
return extremes
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) -> Tuple:
"""simple docstring"""
global _shared_dataset
UpperCamelCase = dataset
UpperCamelCase = []
UpperCamelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=_UpperCamelCase)
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_UpperCamelCase , _UpperCamelCase , ) , total=len(_UpperCamelCase) , ):
extremes_list.append(_UpperCamelCase)
return extremes_list
def lowercase__ ( _UpperCamelCase , _UpperCamelCase = 0.8_5) -> Tuple[Type[Dataset], List[List[Dict]]]:
"""simple docstring"""
UpperCamelCase = make_duplicate_clusters(_UpperCamelCase , _UpperCamelCase)
UpperCamelCase = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
UpperCamelCase = {}
UpperCamelCase = find_extremes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
for extremes in extremes_clusters:
for element in extremes:
UpperCamelCase = element
UpperCamelCase = duplicate_indices - set(extreme_dict.keys())
UpperCamelCase = dataset.filter(lambda _UpperCamelCase , _UpperCamelCase: idx not in remove_indices , with_indices=_UpperCamelCase)
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
UpperCamelCase = element['base_index'] in extreme_dict
if element["is_extreme"]:
UpperCamelCase = extreme_dict[element['base_index']]['copies']
print(F'Original dataset size: {len(_UpperCamelCase)}')
print(F'Number of duplicate clusters: {len(_UpperCamelCase)}')
print(F'Files in duplicate cluster: {len(_UpperCamelCase)}')
print(F'Unique files in duplicate cluster: {len(_UpperCamelCase)}')
print(F'Filtered dataset size: {len(_UpperCamelCase)}')
return ds_filter, duplicate_clusters
| 280 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ : str = logging.get_logger(__name__)
def lowercase__ ( _UpperCamelCase) -> int:
"""simple docstring"""
UpperCamelCase = torch.load(_UpperCamelCase , map_location='cpu')
if "model" in sd.keys():
UpperCamelCase = torch.load(_UpperCamelCase , map_location='cpu')['model']
# pop unnecessary weights
UpperCamelCase = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(_UpperCamelCase)
UpperCamelCase = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
UpperCamelCase = sd.pop(_UpperCamelCase)
UpperCamelCase = list(sd.keys())
for key in keys:
if ".qkv_proj." in key:
UpperCamelCase = sd[key]
# We split QKV in separate Q,K,V
UpperCamelCase = key.replace('.qkv_proj.' , '.q_proj.')
UpperCamelCase = key.replace('.qkv_proj.' , '.k_proj.')
UpperCamelCase = key.replace('.qkv_proj.' , '.v_proj.')
UpperCamelCase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
UpperCamelCase , UpperCamelCase , UpperCamelCase = torch.split(_UpperCamelCase , depth // 3 , dim=0)
UpperCamelCase = q
UpperCamelCase = k
UpperCamelCase = v
del sd[key]
return sd
@torch.no_grad()
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = load_checkpoint(_UpperCamelCase)
if config is not None:
UpperCamelCase = OPTConfig.from_pretrained(_UpperCamelCase)
else:
UpperCamelCase = OPTConfig()
UpperCamelCase = OPTModel(_UpperCamelCase).half().eval()
model.load_state_dict(_UpperCamelCase)
# Check results
Path(_UpperCamelCase).mkdir(exist_ok=_UpperCamelCase)
model.save_pretrained(_UpperCamelCase)
if __name__ == "__main__":
__magic_name__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
__magic_name__ : str = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 280 | 1 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : str = logging.get_logger(__name__)
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
__magic_name__ = 128
elif "12-12" in model_name:
__magic_name__ = 12
__magic_name__ = 12
elif "14-14" in model_name:
__magic_name__ = 14
__magic_name__ = 14
elif "16-16" in model_name:
__magic_name__ = 16
__magic_name__ = 16
else:
raise ValueError("""Model not supported""" )
__magic_name__ = """huggingface/label-files"""
if "speech-commands" in model_name:
__magic_name__ = 35
__magic_name__ = """speech-commands-v2-id2label.json"""
else:
__magic_name__ = 527
__magic_name__ = """audioset-id2label.json"""
__magic_name__ = json.load(open(hf_hub_download(A_, A_, repo_type="""dataset""" ), """r""" ) )
__magic_name__ = {int(A_ ): v for k, v in idalabel.items()}
__magic_name__ = idalabel
__magic_name__ = {v: k for k, v in idalabel.items()}
return config
def a__ ( A_ ):
'''simple docstring'''
if "module.v" in name:
__magic_name__ = name.replace("""module.v""", """audio_spectrogram_transformer""" )
if "cls_token" in name:
__magic_name__ = name.replace("""cls_token""", """embeddings.cls_token""" )
if "dist_token" in name:
__magic_name__ = name.replace("""dist_token""", """embeddings.distillation_token""" )
if "pos_embed" in name:
__magic_name__ = name.replace("""pos_embed""", """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__magic_name__ = name.replace("""patch_embed.proj""", """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
__magic_name__ = name.replace("""blocks""", """encoder.layer""" )
if "attn.proj" in name:
__magic_name__ = name.replace("""attn.proj""", """attention.output.dense""" )
if "attn" in name:
__magic_name__ = name.replace("""attn""", """attention.self""" )
if "norm1" in name:
__magic_name__ = name.replace("""norm1""", """layernorm_before""" )
if "norm2" in name:
__magic_name__ = name.replace("""norm2""", """layernorm_after""" )
if "mlp.fc1" in name:
__magic_name__ = name.replace("""mlp.fc1""", """intermediate.dense""" )
if "mlp.fc2" in name:
__magic_name__ = name.replace("""mlp.fc2""", """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
__magic_name__ = name.replace("""audio_spectrogram_transformer.norm""", """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
__magic_name__ = name.replace("""module.mlp_head.0""", """classifier.layernorm""" )
if "module.mlp_head.1" in name:
__magic_name__ = name.replace("""module.mlp_head.1""", """classifier.dense""" )
return name
def a__ ( A_, A_ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__magic_name__ = orig_state_dict.pop(A_ )
if "qkv" in key:
__magic_name__ = key.split(""".""" )
__magic_name__ = int(key_split[3] )
__magic_name__ = config.hidden_size
if "weight" in key:
__magic_name__ = val[:dim, :]
__magic_name__ = val[dim : dim * 2, :]
__magic_name__ = val[-dim:, :]
else:
__magic_name__ = val[:dim]
__magic_name__ = val[dim : dim * 2]
__magic_name__ = val[-dim:]
else:
__magic_name__ = val
return orig_state_dict
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(A_, A_ )
@torch.no_grad()
def a__ ( A_, A_, A_=False ):
'''simple docstring'''
__magic_name__ = get_audio_spectrogram_transformer_config(A_ )
__magic_name__ = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
__magic_name__ = model_name_to_url[model_name]
__magic_name__ = torch.hub.load_state_dict_from_url(A_, map_location="""cpu""" )
# remove some keys
remove_keys(A_ )
# rename some keys
__magic_name__ = convert_state_dict(A_, A_ )
# load 🤗 model
__magic_name__ = ASTForAudioClassification(A_ )
model.eval()
model.load_state_dict(A_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
__magic_name__ = -4.2677393 if """speech-commands""" not in model_name else -6.845978
__magic_name__ = 4.5689974 if """speech-commands""" not in model_name else 5.5654526
__magic_name__ = 1024 if """speech-commands""" not in model_name else 128
__magic_name__ = ASTFeatureExtractor(mean=A_, std=A_, max_length=A_ )
if "speech-commands" in model_name:
__magic_name__ = load_dataset("""speech_commands""", """v0.02""", split="""validation""" )
__magic_name__ = dataset[0]["""audio"""]["""array"""]
else:
__magic_name__ = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""", filename="""sample_audio.flac""", repo_type="""dataset""", )
__magic_name__ , __magic_name__ = torchaudio.load(A_ )
__magic_name__ = waveform.squeeze().numpy()
__magic_name__ = feature_extractor(A_, sampling_rate=16000, return_tensors="""pt""" )
# forward pass
__magic_name__ = model(**A_ )
__magic_name__ = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
__magic_name__ = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
__magic_name__ = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
__magic_name__ = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
__magic_name__ = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
__magic_name__ = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
__magic_name__ = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
__magic_name__ = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
__magic_name__ = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3], A_, atol=1e-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(A_ ).mkdir(exist_ok=A_ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(f'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(A_ )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(f'''MIT/{model_name}''' )
feature_extractor.push_to_hub(f'''MIT/{model_name}''' )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase : Any = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 716 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=_A ):
'''simple docstring'''
a__ = ["""note_seq"""]
def __init__( self : Any , *UpperCamelCase__ : str , **UpperCamelCase__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""note_seq"""] )
@classmethod
def _lowercase ( cls : str , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Tuple ) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""note_seq"""] )
@classmethod
def _lowercase ( cls : List[str] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Tuple ) -> int:
"""simple docstring"""
requires_backends(cls , ["""note_seq"""] )
| 76 | 0 |
import copy
import random
from transformers import CLIPTokenizer
class _UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , *a__ , **a__ ):
super().__init__(*a__ , **a__ )
A_ : Dict = {}
def _lowerCamelCase ( self , a__ , *a__ , **a__ ):
A_ : Any = super().add_tokens(a__ , *a__ , **a__ )
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
""" `placeholder_token` that is not already in the tokenizer.""" )
def _lowerCamelCase ( self , a__ , *a__ , a__=1 , **a__ ):
A_ : Any = []
if num_vec_per_token == 1:
self.try_adding_tokens(a__ , *a__ , **a__ )
output.append(a__ )
else:
A_ : Tuple = []
for i in range(a__ ):
A_ : Any = placeholder_token + F"""_{i}"""
self.try_adding_tokens(a__ , *a__ , **a__ )
output.append(a__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""" )
A_ : List[str] = output
def _lowerCamelCase ( self , a__ , a__=False , a__=1.0 ):
if isinstance(a__ , a__ ):
A_ : Tuple = []
for i in range(len(a__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=a__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
A_ : Union[str, Any] = self.token_map[placeholder_token]
A_ : Union[str, Any] = tokens[: 1 + int(len(a__ ) * prop_tokens_to_load )]
if vector_shuffle:
A_ : int = copy.copy(a__ )
random.shuffle(a__ )
A_ : Union[str, Any] = text.replace(a__ , """ """.join(a__ ) )
return text
def __call__( self , a__ , *a__ , a__=False , a__=1.0 , **a__ ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
a__ , vector_shuffle=a__ , prop_tokens_to_load=a__ ) , *a__ , **a__ , )
def _lowerCamelCase ( self , a__ , *a__ , a__=False , a__=1.0 , **a__ ):
return super().encode(
self.replace_placeholder_tokens_in_text(
a__ , vector_shuffle=a__ , prop_tokens_to_load=a__ ) , *a__ , **a__ , )
| 569 |
"""simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase ( a__ , a__ , a__ ):
'''simple docstring'''
lowerCAmelCase :Tuple = MobileBertConfig.from_json_file(a__ )
print(F"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase :Tuple = MobileBertForPreTraining(a__ )
# Load weights from tf checkpoint
lowerCAmelCase :Any = load_tf_weights_in_mobilebert(a__ , a__ , a__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , a__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--mobilebert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained MobileBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path) | 553 | 0 |
def _lowerCamelCase ( __A : Tuple , __A : Dict ) -> Dict:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCAmelCase_ , int(b / 2 ) ) * actual_power(lowerCAmelCase_ , int(b / 2 ) )
else:
return a * actual_power(lowerCAmelCase_ , int(b / 2 ) ) * actual_power(lowerCAmelCase_ , int(b / 2 ) )
def _lowerCamelCase ( __A : Dict , __A : str ) -> Dict:
if b < 0:
return 1 / actual_power(lowerCAmelCase_ , lowerCAmelCase_ )
return actual_power(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 703 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
SCREAMING_SNAKE_CASE = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
SCREAMING_SNAKE_CASE = [0, 25, 50]
SCREAMING_SNAKE_CASE = [25, 50, 75]
SCREAMING_SNAKE_CASE = fuzz.membership.trimf(X, abca)
SCREAMING_SNAKE_CASE = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
SCREAMING_SNAKE_CASE = np.ones(75)
SCREAMING_SNAKE_CASE = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
SCREAMING_SNAKE_CASE = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
SCREAMING_SNAKE_CASE = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
SCREAMING_SNAKE_CASE = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
SCREAMING_SNAKE_CASE = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
SCREAMING_SNAKE_CASE = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
SCREAMING_SNAKE_CASE = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
SCREAMING_SNAKE_CASE = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
SCREAMING_SNAKE_CASE = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 186 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowerCamelCase__ ( _a ):
a : int = """Salesforce/blip-image-captioning-base"""
a : List[str] = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
a : Any = """image_captioner"""
a : List[str] = AutoModelForVisionaSeq
a : Any = ["""image"""]
a : Union[str, Any] = ["""text"""]
def __init__( self : Tuple , *A_ : str , **A_ : int ):
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*A_ , **A_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , A_ : "Image" ):
'''simple docstring'''
return self.pre_processor(images=A_ , return_tensors="""pt""" )
def SCREAMING_SNAKE_CASE_ ( self : Dict , A_ : Optional[int] ):
'''simple docstring'''
return self.model.generate(**A_ )
def SCREAMING_SNAKE_CASE_ ( self : str , A_ : str ):
'''simple docstring'''
return self.pre_processor.batch_decode(A_ , skip_special_tokens=A_ )[0].strip()
| 616 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCamelCase__ ( _a , unittest.TestCase ):
a : Union[str, Any] = BarthezTokenizer
a : Any = BarthezTokenizerFast
a : Tuple = True
a : List[Any] = True
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
super().setUp()
__lowercase = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=A_ )
__lowercase = tokenizer
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
__lowercase = """<pad>"""
__lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A_ ) , 1_0_1_1_2_2 )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2 )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
__lowercase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__lowercase = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
__lowercase = self.tokenizer(
A_ , max_length=len(A_ ) , padding=A_ , truncation=A_ , return_tensors="""pt""" )
self.assertIsInstance(A_ , A_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__lowercase = batch.input_ids.tolist()[0]
self.assertListEqual(A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = tokenizer.tokenize(A_ )
__lowercase = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
__lowercase = tokenizer.encode(A_ , add_special_tokens=A_ )
__lowercase = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(A_ )
__lowercase = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = {"""input_ids""": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__lowercase = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=A_ , )
| 616 | 1 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
lowerCAmelCase__ : str = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
lowerCAmelCase__ : Union[str, Any] = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
lowerCAmelCase__ : Optional[Any] = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self : Union[str, Any] ):
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='http://www.cs.umd.edu/~snover/tercom/' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' ,id='sequence' ) ,id='references' ),
} ) ,codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] ,reference_urls=[
'https://github.com/jhclark/tercom',
] ,)
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,):
UpperCAmelCase__ = len(references[0] )
if any(len(lowerCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
UpperCAmelCase__ = [[refs[i] for refs in references] for i in range(lowerCamelCase__ )]
UpperCAmelCase__ = TER(
normalized=lowerCamelCase__ ,no_punct=lowerCamelCase__ ,asian_support=lowerCamelCase__ ,case_sensitive=lowerCamelCase__ ,)
UpperCAmelCase__ = sb_ter.corpus_score(lowerCamelCase__ ,lowerCamelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 706 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ : int = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Union[str, Any] = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[Any] = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 632 | 0 |
from ...processing_utils import ProcessorMixin
class a__ ( _UpperCamelCase ):
A__ : Optional[int] = "SpeechT5FeatureExtractor"
A__ : str = "SpeechT5Tokenizer"
def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
super().__init__(__a , __a )
def __call__( self , *UpperCAmelCase , **UpperCAmelCase ) -> str:
__a = kwargs.pop('audio' , __a )
__a = kwargs.pop('text' , __a )
__a = kwargs.pop('text_target' , __a )
__a = kwargs.pop('audio_target' , __a )
__a = kwargs.pop('sampling_rate' , __a )
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' )
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' )
if audio is not None:
__a = self.feature_extractor(__a , *__a , sampling_rate=__a , **__a )
elif text is not None:
__a = self.tokenizer(__a , **__a )
else:
__a = None
if audio_target is not None:
__a = self.feature_extractor(audio_target=__a , *__a , sampling_rate=__a , **__a )
__a = targets["input_values"]
elif text_target is not None:
__a = self.tokenizer(__a , **__a )
__a = targets["input_ids"]
else:
__a = None
if inputs is None:
return targets
if targets is not None:
__a = labels
__a = targets.get('attention_mask' )
if decoder_attention_mask is not None:
__a = decoder_attention_mask
return inputs
def __SCREAMING_SNAKE_CASE ( self , *UpperCAmelCase , **UpperCAmelCase ) -> List[Any]:
__a = kwargs.pop('input_values' , __a )
__a = kwargs.pop('input_ids' , __a )
__a = kwargs.pop('labels' , __a )
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' )
if input_values is not None:
__a = self.feature_extractor.pad(__a , *__a , **__a )
elif input_ids is not None:
__a = self.tokenizer.pad(__a , **__a )
else:
__a = None
if labels is not None:
if "input_ids" in labels or (isinstance(__a , __a ) and "input_ids" in labels[0]):
__a = self.tokenizer.pad(__a , **__a )
__a = targets["input_ids"]
else:
__a = self.feature_extractor.feature_size
__a = self.feature_extractor.num_mel_bins
__a = self.feature_extractor.pad(__a , *__a , **__a )
__a = feature_size_hack
__a = targets["input_values"]
else:
__a = None
if inputs is None:
return targets
if targets is not None:
__a = labels
__a = targets.get('attention_mask' )
if decoder_attention_mask is not None:
__a = decoder_attention_mask
return inputs
def __SCREAMING_SNAKE_CASE ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Optional[int]:
return self.tokenizer.batch_decode(*__a , **__a )
def __SCREAMING_SNAKE_CASE ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Dict:
return self.tokenizer.decode(*__a , **__a )
| 559 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"google/mobilenet_v1_1.0_224": "https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json",
"google/mobilenet_v1_0.75_192": "https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = "mobilenet_v1"
def __init__( self : Optional[int] , __a : List[str]=3 , __a : Union[str, Any]=224 , __a : Tuple=1.0 , __a : List[Any]=8 , __a : Union[str, Any]="relu6" , __a : Dict=True , __a : Tuple=0.9_99 , __a : Dict=0.02 , __a : Any=0.0_01 , **__a : Any , ) -> int:
super().__init__(**__a )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
_UpperCamelCase : Any = num_channels
_UpperCamelCase : Optional[Any] = image_size
_UpperCamelCase : Optional[Any] = depth_multiplier
_UpperCamelCase : int = min_depth
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : Union[str, Any] = tf_padding
_UpperCamelCase : int = classifier_dropout_prob
_UpperCamelCase : Optional[int] = initializer_range
_UpperCamelCase : Union[str, Any] = layer_norm_eps
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> float:
return 1e-4
| 624 | 0 |
"""simple docstring"""
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def a__ ( lowerCAmelCase : Sequence[float] , lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
UpperCAmelCase__ : Optional[Any] = (low + high) // 2
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = max_subarray(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = max_subarray(lowerCAmelCase , mid + 1 , lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = max_cross_sum(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def a__ ( lowerCAmelCase : Sequence[float] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = float("-inf" ), -1
UpperCAmelCase__ , UpperCAmelCase__ : Any = float("-inf" ), -1
UpperCAmelCase__ : int | float = 0
for i in range(lowerCAmelCase , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
UpperCAmelCase__ : Optional[int] = summ
UpperCAmelCase__ : str = i
UpperCAmelCase__ : Any = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
UpperCAmelCase__ : Optional[Any] = summ
UpperCAmelCase__ : int = i
return max_left, max_right, (left_sum + right_sum)
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = [randint(1 , lowerCAmelCase ) for _ in range(lowerCAmelCase )]
UpperCAmelCase__ : List[str] = time.time()
max_subarray(lowerCAmelCase , 0 , input_size - 1 )
UpperCAmelCase__ : Optional[Any] = time.time()
return end - start
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000]
UpperCAmelCase__ : List[Any] = [time_max_subarray(lowerCAmelCase ) for input_size in input_sizes]
print("No of Inputs\t\tTime Taken" )
for input_size, runtime in zip(lowerCAmelCase , lowerCAmelCase ):
print(lowerCAmelCase , "\t\t" , lowerCAmelCase )
plt.plot(lowerCAmelCase , lowerCAmelCase )
plt.xlabel("Number of Inputs" )
plt.ylabel("Time taken in seconds" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 660 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ''
_A = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> str:
super().__init__(self , **__UpperCamelCase )
UpperCAmelCase__ : int = repo_info
UpperCAmelCase__ : Optional[int] = token
UpperCAmelCase__ : Optional[Any] = None
def lowerCAmelCase__ ( self )-> Optional[Any]:
if self.dir_cache is None:
UpperCAmelCase__ : str = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase__ : str = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"name": str(__UpperCamelCase ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , )-> List[Any]:
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
UpperCAmelCase__ : Union[str, Any] = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCAmelCase__ ( self , __UpperCamelCase , **__UpperCamelCase )-> List[str]:
self._get_dirs()
UpperCAmelCase__ : Union[str, Any] = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase )-> str:
self._get_dirs()
UpperCAmelCase__ : str = PurePosixPath(path.strip("/" ) )
UpperCAmelCase__ : Optional[Any] = {}
for p, f in self.dir_cache.items():
UpperCAmelCase__ : Optional[int] = PurePosixPath(p.strip("/" ) )
UpperCAmelCase__ : Dict = p.parent
if root == path:
UpperCAmelCase__ : Tuple = f
UpperCAmelCase__ : List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 660 | 1 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
lowerCAmelCase = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Dict=None , snake_case_ : Tuple=None ) ->Dict:
# Recurse if needed
if "." in tensor_name:
lowerCamelCase__ : List[Any] =tensor_name.split('.' )
for split in splits[:-1]:
lowerCamelCase__ : Dict =getattr(snake_case_ , snake_case_ )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
lowerCamelCase__ : int =new_module
lowerCamelCase__ : Any =splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
lowerCamelCase__ : int =tensor_name in module._buffers
lowerCamelCase__ : Any =getattr(snake_case_ , snake_case_ )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
lowerCamelCase__ : str =False
lowerCamelCase__ : Any =False
if is_buffer or not is_bitsandbytes_available():
lowerCamelCase__ : int =False
lowerCamelCase__ : List[str] =False
else:
lowerCamelCase__ : Any =hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
lowerCamelCase__ : Any =isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
lowerCamelCase__ : List[str] =module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
lowerCamelCase__ : Optional[int] =old_value.to(snake_case_ )
elif isinstance(snake_case_ , torch.Tensor ):
lowerCamelCase__ : Tuple =value.to('cpu' )
if value.dtype == torch.inta:
lowerCamelCase__ : Any =version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
lowerCamelCase__ : Optional[Any] =torch.tensor(snake_case_ , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , snake_case_ ) and fpaa_statistics is None:
lowerCamelCase__ : Optional[Any] =new_value.T
lowerCamelCase__ : str =old_value.__dict__
if is_abit:
lowerCamelCase__ : Optional[int] =bnb.nn.IntaParams(snake_case_ , requires_grad=snake_case_ , **snake_case_ ).to(snake_case_ )
elif is_abit:
lowerCamelCase__ : Dict =bnb.nn.Paramsabit(snake_case_ , requires_grad=snake_case_ , **snake_case_ ).to(snake_case_ )
lowerCamelCase__ : Union[str, Any] =new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(snake_case_ ) )
else:
if value is None:
lowerCamelCase__ : List[str] =old_value.to(snake_case_ )
elif isinstance(snake_case_ , torch.Tensor ):
lowerCamelCase__ : Any =value.to(snake_case_ )
else:
lowerCamelCase__ : str =torch.tensor(snake_case_ , device=snake_case_ )
if is_buffer:
lowerCamelCase__ : Dict =new_value
else:
lowerCamelCase__ : int =nn.Parameter(snake_case_ , requires_grad=old_value.requires_grad )
lowerCamelCase__ : int =new_value
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Dict=None , snake_case_ : int=None , snake_case_ : int=None , snake_case_ : Optional[Any]=False ) ->List[str]:
for name, module in model.named_children():
if current_key_name is None:
lowerCamelCase__ : Dict =[]
current_key_name.append(snake_case_ )
if (isinstance(snake_case_ , nn.Linear ) or isinstance(snake_case_ , snake_case_ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(snake_case_ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(snake_case_ , snake_case_ ):
lowerCamelCase__ , lowerCamelCase__ : str =module.weight.shape
else:
lowerCamelCase__ : Dict =module.in_features
lowerCamelCase__ : Any =module.out_features
if quantization_config.quantization_method() == "llm_int8":
lowerCamelCase__ : List[Any] =bnb.nn.LinearabitLt(
snake_case_ , snake_case_ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
lowerCamelCase__ : List[str] =True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
lowerCamelCase__ : int =bnb.nn.Linearabit(
snake_case_ , snake_case_ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
lowerCamelCase__ : Union[str, Any] =True
# Store the module class in case we need to transpose the weight later
lowerCamelCase__ : Any =type(snake_case_ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(snake_case_ )
if len(list(module.children() ) ) > 0:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =_replace_with_bnb_linear(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , has_been_replaced=snake_case_ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Optional[Any]=None , snake_case_ : List[Any]=None , snake_case_ : Tuple=None ) ->Tuple:
lowerCamelCase__ : Dict =['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
lowerCamelCase__ , lowerCamelCase__ : List[Any] =_replace_with_bnb_linear(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def lowerCAmelCase_ ( *snake_case_ : Union[str, Any] , **snake_case_ : int ) ->Optional[int]:
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , snake_case_ , )
return replace_with_bnb_linear(*snake_case_ , **snake_case_ )
def lowerCAmelCase_ ( *snake_case_ : int , **snake_case_ : Any ) ->Any:
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , snake_case_ , )
return set_module_quantized_tensor_to_device(*snake_case_ , **snake_case_ )
def lowerCAmelCase_ ( snake_case_ : List[Any] ) ->Optional[int]:
lowerCamelCase__ : List[str] =deepcopy(snake_case_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
lowerCamelCase__ : List[Any] =find_tied_parameters(snake_case_ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case_ , snake_case_ ):
lowerCamelCase__ : List[Any] =sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCamelCase__ : Tuple =sum(snake_case_ , [] )
lowerCamelCase__ : str =len(snake_case_ ) > 0
# Check if it is a base model
lowerCamelCase__ : Dict =not hasattr(snake_case_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCamelCase__ : Optional[Any] =list(model.named_children() )
lowerCamelCase__ : Optional[int] =[list_modules[-1][0]]
# add last module together with tied weights
lowerCamelCase__ : Any =set(snake_case_ ) - set(snake_case_ )
lowerCamelCase__ : int =list(set(snake_case_ ) ) + list(snake_case_ )
# remove ".weight" from the keys
lowerCamelCase__ : Union[str, Any] =['.weight', '.bias']
lowerCamelCase__ : Union[str, Any] =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCamelCase__ : Any =name.replace(snake_case_ , '' )
filtered_module_names.append(snake_case_ )
return filtered_module_names | 174 |
"""simple docstring"""
from collections.abc import Callable
class A_ :
"""simple docstring"""
def __init__( self :Tuple , lowerCamelCase_ :Callable | None = None ):
"""simple docstring"""
lowerCamelCase__ : list =[]
# Stores indexes of each item for supporting updates and deletion.
lowerCamelCase__ : dict ={}
# Stores current size of heap.
lowerCamelCase__ : List[Any] =0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowerCamelCase__ : Any =key or (lambda lowerCamelCase_ : x)
def UpperCAmelCase__ ( self :Optional[Any] , lowerCamelCase_ :int ):
"""simple docstring"""
return int((i - 1) / 2 ) if i > 0 else None
def UpperCAmelCase__ ( self :List[str] , lowerCamelCase_ :int ):
"""simple docstring"""
lowerCamelCase__ : Dict =int(2 * i + 1 )
return left if 0 < left < self.size else None
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :int ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =int(2 * i + 2 )
return right if 0 < right < self.size else None
def UpperCAmelCase__ ( self :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :int ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : str =(
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowerCamelCase__ , lowerCamelCase__ : Any =self.arr[j], self.arr[i]
def UpperCAmelCase__ ( self :int , lowerCamelCase_ :int , lowerCamelCase_ :int ):
"""simple docstring"""
return self.arr[i][1] < self.arr[j][1]
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :int ):
"""simple docstring"""
lowerCamelCase__ : Tuple =self._left(lowerCamelCase_ )
lowerCamelCase__ : Dict =self._right(lowerCamelCase_ )
lowerCamelCase__ : List[Any] =i
if left is not None and not self._cmp(lowerCamelCase_ , lowerCamelCase_ ):
lowerCamelCase__ : Dict =left
if right is not None and not self._cmp(lowerCamelCase_ , lowerCamelCase_ ):
lowerCamelCase__ : Union[str, Any] =right
return valid_parent
def UpperCAmelCase__ ( self :Union[str, Any] , lowerCamelCase_ :int ):
"""simple docstring"""
lowerCamelCase__ : int =self._parent(lowerCamelCase_ )
while parent is not None and not self._cmp(lowerCamelCase_ , lowerCamelCase_ ):
self._swap(lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : List[Any] =parent, self._parent(lowerCamelCase_ )
def UpperCAmelCase__ ( self :Optional[int] , lowerCamelCase_ :int ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =self._get_valid_parent(lowerCamelCase_ )
while valid_parent != index:
self._swap(lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : Dict =valid_parent, self._get_valid_parent(lowerCamelCase_ )
def UpperCAmelCase__ ( self :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :int ):
"""simple docstring"""
if item not in self.pos_map:
return
lowerCamelCase__ : Optional[int] =self.pos_map[item]
lowerCamelCase__ : List[Any] =[item, self.key(lowerCamelCase_ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowerCamelCase_ )
self._heapify_down(lowerCamelCase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowerCamelCase_ :int ):
"""simple docstring"""
if item not in self.pos_map:
return
lowerCamelCase__ : Optional[int] =self.pos_map[item]
del self.pos_map[item]
lowerCamelCase__ : Optional[int] =self.arr[self.size - 1]
lowerCamelCase__ : List[Any] =index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowerCamelCase_ )
self._heapify_down(lowerCamelCase_ )
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :int ):
"""simple docstring"""
lowerCamelCase__ : Tuple =len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(lowerCamelCase_ )] )
else:
lowerCamelCase__ : int =[item, self.key(lowerCamelCase_ )]
lowerCamelCase__ : Optional[int] =self.size
self.size += 1
self._heapify_up(self.size - 1 )
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
return self.arr[0] if self.size else None
def UpperCAmelCase__ ( self :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowerCAmelCase_ ( ) ->None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | 174 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : List[str] = logging.get_logger(__name__)
__magic_name__ : Optional[Any] = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class __SCREAMING_SNAKE_CASE ( __lowercase ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = '''visual_bert'''
def __init__( self , lowerCamelCase=30_522 , lowerCamelCase=768 , lowerCamelCase=512 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3_072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , **lowerCamelCase , ):
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
_snake_case = vocab_size
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = visual_embedding_dim
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = type_vocab_size
_snake_case = layer_norm_eps
_snake_case = bypass_transformer
_snake_case = special_visual_initialize
| 720 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__magic_name__ : Any = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[Any] = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__magic_name__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 368 | 0 |
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[int] = AutoConfig.from_pretrained(__lowerCamelCase )
_a : Any = FlaxAutoModelForSeqaSeqLM.from_config(config=__lowerCamelCase )
_a : Dict = checkpoints.load_tax_checkpoint(__lowerCamelCase )
_a : Union[str, Any] = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
_a : Optional[Any] = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_a : int = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_a : List[str] = "TransientGlobalSelfAttention"
else:
raise ValueError(
"""Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"""
""" attribute with a value from ['local', 'transient-global].""" )
# Encoder
for layer_index in range(config.num_layers ):
_a : Tuple = F"""layers_{str(__lowerCamelCase )}"""
# Self-Attention
_a : Tuple = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
_a : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
_a : Dict = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
_a : str = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_a : Union[str, Any] = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
_a : Union[str, Any] = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
_a : Dict = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
_a : Any = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
_a : int = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
_a : int = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
_a : Tuple = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
_a : Optional[Any] = flax_model.params["encoder"]["block"][str(__lowerCamelCase )]["layer"]
_a : Tuple = tax_attention_key
_a : str = tax_attention_out
_a : Dict = tax_attention_query
_a : Union[str, Any] = tax_attention_value
_a : Tuple = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_a : str = tax_global_layer_norm
if split_mlp_wi:
_a : Optional[int] = tax_mlp_wi_a
_a : Union[str, Any] = tax_mlp_wi_a
else:
_a : List[str] = tax_mlp_wi
_a : Union[str, Any] = tax_mlp_wo
_a : Optional[int] = tax_mlp_layer_norm
_a : List[str] = flax_model_encoder_layer_block
# Only for layer 0:
_a : Any = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
_a : List[Any] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_a : Optional[Any] = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
_a : Any = tax_encoder_global_rel_embedding
# Assigning
_a : int = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
_a : List[Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_a : List[str] = F"""layers_{str(__lowerCamelCase )}"""
# Self-Attention
_a : str = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
_a : str = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
_a : int = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
_a : Tuple = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
_a : Dict = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
_a : Any = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
_a : str = tax_enc_dec_attention_module["key"]["kernel"]
_a : Optional[Any] = tax_enc_dec_attention_module["out"]["kernel"]
_a : List[Any] = tax_enc_dec_attention_module["query"]["kernel"]
_a : int = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
_a : Optional[int] = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
_a : Optional[int] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
_a : List[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
_a : List[str] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
_a : Tuple = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
_a : str = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
_a : Union[str, Any] = flax_model.params["decoder"]["block"][str(__lowerCamelCase )]["layer"]
_a : int = tax_attention_key
_a : int = tax_attention_out
_a : int = tax_attention_query
_a : Optional[int] = tax_attention_value
_a : Optional[Any] = tax_pre_attention_layer_norm
_a : List[Any] = tax_enc_dec_attention_key
_a : Union[str, Any] = tax_enc_dec_attention_out
_a : int = tax_enc_dec_attention_query
_a : int = tax_enc_dec_attention_value
_a : Optional[Any] = tax_cross_layer_norm
if split_mlp_wi:
_a : str = tax_mlp_wi_a
_a : List[Any] = tax_mlp_wi_a
else:
_a : List[Any] = tax_mlp_wi
_a : Optional[Any] = tax_mlp_wo
_a : Optional[int] = txa_mlp_layer_norm
_a : Tuple = flax_model_decoder_layer_block
# Decoder Normalization
_a : Dict = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
_a : Union[str, Any] = txa_decoder_norm
# Only for layer 0:
_a : Optional[int] = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
_a : Optional[Any] = tax_decoder_rel_embedding
# Token Embeddings
_a : Any = tax_model["target"]["token_embedder"]["embedding"]
_a : int = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_a : Optional[int] = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(__lowerCamelCase )
print("""T5X Model was sucessfully converted!""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
_snake_case = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 389 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__lowerCamelCase = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
__lowerCamelCase = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
__lowerCamelCase = R"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str , snake_case__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[int] = 0.0
for i, j in zip(snake_case__ , snake_case__ ):
n_correct += 1.0 if math_equivalence.is_equiv(snake_case__ , snake_case__ ) else 0.0
snake_case : Optional[int] = n_correct / len(snake_case__ )
return {
"accuracy": accuracy,
}
| 204 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self ,*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ):
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' ,__SCREAMING_SNAKE_CASE ,)
super().__init__(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
| 712 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
A = KandinskyVaaImgaImgPipeline
A = ['image_embeds', 'negative_image_embeds', 'image']
A = [
'image_embeds',
'negative_image_embeds',
'image',
]
A = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
A = False
@property
def __a ( self ):
return 32
@property
def __a ( self ):
return 32
@property
def __a ( self ):
return self.time_input_dim
@property
def __a ( self ):
return self.time_input_dim * 4
@property
def __a ( self ):
return 100
@property
def __a ( self ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def __a ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __a ( self ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def __a ( self ):
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_unet
SCREAMING_SNAKE_CASE : List[str] = self.dummy_movq
SCREAMING_SNAKE_CASE : int = {
'num_train_timesteps': 1000,
'beta_schedule': 'linear',
'beta_start': 0.0_0085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
SCREAMING_SNAKE_CASE : int = DDIMScheduler(**__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE=0 ):
SCREAMING_SNAKE_CASE : int = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
__SCREAMING_SNAKE_CASE )
# create init_image
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) ,rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = image.cpu().permute(0 ,2 ,3 ,1 )[0]
SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert('RGB' ).resize((256, 256) )
if str(__SCREAMING_SNAKE_CASE ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE : int = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def __a ( self ):
SCREAMING_SNAKE_CASE : Optional[Any] = 'cpu'
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE : Tuple = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) ,return_dict=__SCREAMING_SNAKE_CASE ,)[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Any = np.array(
[0.619_9778, 0.6398_4406, 0.4614_5785, 0.6294_4984, 0.562_2215, 0.4730_6132, 0.4744_1456, 0.460_7606, 0.4871_9263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ):
SCREAMING_SNAKE_CASE : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_img2img_frog.npy' )
SCREAMING_SNAKE_CASE : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
SCREAMING_SNAKE_CASE : Optional[Any] = 'A red cartoon frog, 4k'
SCREAMING_SNAKE_CASE : str = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' ,torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[Any] = KandinskyVaaImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' ,torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : str = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = pipe_prior(
__SCREAMING_SNAKE_CASE ,generator=__SCREAMING_SNAKE_CASE ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple()
SCREAMING_SNAKE_CASE : Optional[Any] = pipeline(
image=__SCREAMING_SNAKE_CASE ,image_embeds=__SCREAMING_SNAKE_CASE ,negative_image_embeds=__SCREAMING_SNAKE_CASE ,generator=__SCREAMING_SNAKE_CASE ,num_inference_steps=100 ,height=768 ,width=768 ,strength=0.2 ,output_type='np' ,)
SCREAMING_SNAKE_CASE : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
| 220 | 0 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
snake_case_ = {
"""iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""",
"""iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""",
"""iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""",
"""mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""",
"""mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""",
"""mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""",
"""mask_downscaling.0""": """mask_embed.conv1""",
"""mask_downscaling.1""": """mask_embed.layer_norm1""",
"""mask_downscaling.3""": """mask_embed.conv2""",
"""mask_downscaling.4""": """mask_embed.layer_norm2""",
"""mask_downscaling.6""": """mask_embed.conv3""",
"""point_embeddings""": """point_embed""",
"""pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""",
"""image_encoder""": """vision_encoder""",
"""neck.0""": """neck.conv1""",
"""neck.1""": """neck.layer_norm1""",
"""neck.2""": """neck.conv2""",
"""neck.3""": """neck.layer_norm2""",
"""patch_embed.proj""": """patch_embed.projection""",
""".norm""": """.layer_norm""",
"""blocks""": """layers""",
}
def __lowercase (_SCREAMING_SNAKE_CASE :Optional[int] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
state_dict.pop('''pixel_mean''' , _SCREAMING_SNAKE_CASE )
state_dict.pop('''pixel_std''' , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = R'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
SCREAMING_SNAKE_CASE : int = key.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : Optional[Any] = int(re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).group(2 ) )
if layer_nb == 0:
SCREAMING_SNAKE_CASE : Any = key.replace('''layers.0''' , '''proj_in''' )
elif layer_nb == 1:
SCREAMING_SNAKE_CASE : Optional[int] = key.replace('''layers.1''' , '''layers.0''' )
elif layer_nb == 2:
SCREAMING_SNAKE_CASE : Dict = key.replace('''layers.2''' , '''proj_out''' )
SCREAMING_SNAKE_CASE : str = value
SCREAMING_SNAKE_CASE : Optional[Any] = model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def __lowercase (_SCREAMING_SNAKE_CASE :Any , _SCREAMING_SNAKE_CASE :Dict , _SCREAMING_SNAKE_CASE :List[str] , _SCREAMING_SNAKE_CASE :Any="ybelkada/segment-anything" ):
SCREAMING_SNAKE_CASE : Any = hf_hub_download(_SCREAMING_SNAKE_CASE , F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
SCREAMING_SNAKE_CASE : Tuple = SamConfig()
elif "sam_vit_l" in model_name:
SCREAMING_SNAKE_CASE : List[Any] = SamVisionConfig(
hidden_size=10_24 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
SCREAMING_SNAKE_CASE : Optional[Any] = SamConfig(
vision_config=_SCREAMING_SNAKE_CASE , )
elif "sam_vit_h" in model_name:
SCREAMING_SNAKE_CASE : List[Any] = SamVisionConfig(
hidden_size=12_80 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
SCREAMING_SNAKE_CASE : Optional[int] = SamConfig(
vision_config=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE : str = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = replace_keys(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = SamImageProcessor()
SCREAMING_SNAKE_CASE : Union[str, Any] = SamProcessor(image_processor=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[Any] = SamModel(_SCREAMING_SNAKE_CASE )
hf_model.load_state_dict(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = hf_model.to('''cuda''' )
SCREAMING_SNAKE_CASE : str = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
SCREAMING_SNAKE_CASE : List[str] = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ).convert('''RGB''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = [[[4_00, 6_50]]]
SCREAMING_SNAKE_CASE : Tuple = [[1]]
SCREAMING_SNAKE_CASE : Dict = processor(images=np.array(_SCREAMING_SNAKE_CASE ) , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = hf_model(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
SCREAMING_SNAKE_CASE : Union[str, Any] = processor(
images=np.array(_SCREAMING_SNAKE_CASE ) , input_points=_SCREAMING_SNAKE_CASE , input_labels=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = hf_model(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
SCREAMING_SNAKE_CASE : Dict = ((75, 2_75, 17_25, 8_50),)
SCREAMING_SNAKE_CASE : Union[str, Any] = processor(images=np.array(_SCREAMING_SNAKE_CASE ) , input_boxes=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = hf_model(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
SCREAMING_SNAKE_CASE : Tuple = [[[4_00, 6_50], [8_00, 6_50]]]
SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 1]]
SCREAMING_SNAKE_CASE : List[str] = processor(
images=np.array(_SCREAMING_SNAKE_CASE ) , input_points=_SCREAMING_SNAKE_CASE , input_labels=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = hf_model(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
snake_case_ = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""]
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
snake_case_ = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 507 |
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def __lowercase (_SCREAMING_SNAKE_CASE :List[str] ):
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() )
@pytest.fixture
def __lowercase (_SCREAMING_SNAKE_CASE :int ):
class a__ :
def __init__(self : Optional[int], __UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = metric_id
class a__ :
__magic_name__ : List[Any] = [MetricMock(_lowercase ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def lowercase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() )
@pytest.mark.parametrize(
'''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def __lowercase (_SCREAMING_SNAKE_CASE :Union[str, Any] , _SCREAMING_SNAKE_CASE :Tuple , _SCREAMING_SNAKE_CASE :Any , _SCREAMING_SNAKE_CASE :Optional[int] , _SCREAMING_SNAKE_CASE :int ):
if "tmp_path" in args:
SCREAMING_SNAKE_CASE : List[Any] = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(_SCREAMING_SNAKE_CASE , match='''https://huggingface.co/docs/evaluate''' ):
func(*_SCREAMING_SNAKE_CASE )
| 507 | 1 |
"""simple docstring"""
from __future__ import annotations
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Union[str, Any] = len(_lowerCamelCase )
# We need to create solution object to save path.
snake_case_ :Optional[int] = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
snake_case_ :Tuple = run_maze(_lowerCamelCase, 0, 0, _lowerCamelCase )
if solved:
print("""\n""".join(str(_lowerCamelCase ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def A_ ( _lowercase, _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :Any = len(_lowerCamelCase )
# Final check point.
if i == j == (size - 1):
snake_case_ :Union[str, Any] = 1
return True
snake_case_ :Any = (not i < 0) and (not j < 0) # Check lower bounds
snake_case_ :Dict = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
snake_case_ :Tuple = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
snake_case_ :Optional[Any] = 1
# check for directions
if (
run_maze(_lowerCamelCase, i + 1, _lowerCamelCase, _lowerCamelCase )
or run_maze(_lowerCamelCase, _lowerCamelCase, j + 1, _lowerCamelCase )
or run_maze(_lowerCamelCase, i - 1, _lowerCamelCase, _lowerCamelCase )
or run_maze(_lowerCamelCase, _lowerCamelCase, j - 1, _lowerCamelCase )
):
return True
snake_case_ :Dict = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 704 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: str ) -> List[str]:
snake_case_ :Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case , """tf_padding""" ) )
self.parent.assertTrue(hasattr(snake_case , """depth_multiplier""" ) )
class lowerCamelCase :
'''simple docstring'''
def __init__( self: List[Any] , snake_case: Dict , snake_case: Dict=13 , snake_case: List[Any]=3 , snake_case: List[Any]=32 , snake_case: Dict=0.2_5 , snake_case: Tuple=8 , snake_case: List[Any]=8 , snake_case: Optional[int]=6 , snake_case: str=32 , snake_case: Dict=True , snake_case: Optional[Any]=True , snake_case: Optional[Any]=True , snake_case: Optional[Any]="relu6" , snake_case: int=1_280 , snake_case: str=0.1 , snake_case: Any=0.0_2 , snake_case: Dict=True , snake_case: Optional[Any]=True , snake_case: Union[str, Any]=10 , snake_case: Dict=None , ) -> Union[str, Any]:
snake_case_ :int = parent
snake_case_ :Any = batch_size
snake_case_ :str = num_channels
snake_case_ :List[Any] = image_size
snake_case_ :Any = depth_multiplier
snake_case_ :Optional[Any] = depth_divisible_by
snake_case_ :str = min_depth
snake_case_ :Dict = expand_ratio
snake_case_ :Optional[int] = tf_padding
snake_case_ :Union[str, Any] = output_stride
snake_case_ :str = first_layer_is_expansion
snake_case_ :List[Any] = finegrained_output
snake_case_ :List[str] = hidden_act
snake_case_ :Any = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
snake_case_ :Dict = classifier_dropout_prob
snake_case_ :Dict = use_labels
snake_case_ :str = is_training
snake_case_ :Union[str, Any] = num_labels
snake_case_ :str = initializer_range
snake_case_ :List[str] = scope
def lowerCAmelCase_ ( self: List[str] ) -> Dict:
snake_case_ :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ :int = None
snake_case_ :List[str] = None
if self.use_labels:
snake_case_ :List[str] = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ :Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case_ :Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase_ ( self: Dict ) -> int:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self: Any , snake_case: List[str] , snake_case: Optional[Any] , snake_case: str , snake_case: str ) -> List[str]:
snake_case_ :int = MobileNetVaModel(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Any = model(snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCAmelCase_ ( self: Dict , snake_case: Dict , snake_case: Tuple , snake_case: Dict , snake_case: int ) -> str:
snake_case_ :List[str] = self.num_labels
snake_case_ :Optional[Any] = MobileNetVaForImageClassification(snake_case )
model.to(snake_case )
model.eval()
snake_case_ :List[str] = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self: List[Any] , snake_case: Union[str, Any] , snake_case: List[Any] , snake_case: Tuple , snake_case: Any ) -> Optional[Any]:
snake_case_ :Tuple = self.num_labels
snake_case_ :Optional[Any] = MobileNetVaForSemanticSegmentation(snake_case )
model.to(snake_case )
model.eval()
snake_case_ :List[str] = model(snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
snake_case_ :Optional[Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase_ ( self: Optional[Any] ) -> List[Any]:
snake_case_ :Tuple = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_, snake_case_ :int = config_and_inputs
snake_case_ :List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : Optional[int] = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_A : Tuple = (
{
"""feature-extraction""": MobileNetVaModel,
"""image-classification""": MobileNetVaForImageClassification,
"""image-segmentation""": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_A : Any = False
_A : Any = False
_A : Tuple = False
_A : Optional[Any] = False
def lowerCAmelCase_ ( self: Optional[int] ) -> Union[str, Any]:
snake_case_ :Optional[Any] = MobileNetVaModelTester(self )
snake_case_ :Optional[Any] = MobileNetVaConfigTester(self , config_class=snake_case , has_text_modality=snake_case )
def lowerCAmelCase_ ( self: List[str] ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def lowerCAmelCase_ ( self: Tuple ) -> List[str]:
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def lowerCAmelCase_ ( self: Optional[int] ) -> Union[str, Any]:
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def lowerCAmelCase_ ( self: str ) -> Optional[int]:
pass
def lowerCAmelCase_ ( self: List[Any] ) -> List[str]:
snake_case_, snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ :Optional[Any] = model_class(snake_case )
snake_case_ :List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ :int = [*signature.parameters.keys()]
snake_case_ :int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case )
def lowerCAmelCase_ ( self: Tuple ) -> Any:
snake_case_ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCAmelCase_ ( self: Tuple ) -> Optional[int]:
def check_hidden_states_output(snake_case: Any , snake_case: List[str] , snake_case: Optional[int] ):
snake_case_ :Optional[Any] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
snake_case_ :Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
snake_case_ :Optional[Any] = outputs.hidden_states
snake_case_ :int = 16
self.assertEqual(len(snake_case ) , snake_case )
snake_case_, snake_case_ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ :Tuple = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ :List[Any] = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def lowerCAmelCase_ ( self: int ) -> Union[str, Any]:
snake_case_ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Tuple:
snake_case_ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case )
@slow
def lowerCAmelCase_ ( self: Any ) -> int:
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ :Union[str, Any] = MobileNetVaModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def A_ ( ):
'''simple docstring'''
snake_case_ :Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self: Any ) -> Any:
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCAmelCase_ ( self: int ) -> Tuple:
snake_case_ :List[Any] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(snake_case )
snake_case_ :List[Any] = self.default_image_processor
snake_case_ :List[str] = prepare_img()
snake_case_ :Optional[Any] = image_processor(images=snake_case , return_tensors="""pt""" ).to(snake_case )
# forward pass
with torch.no_grad():
snake_case_ :List[str] = model(**snake_case )
# verify the logits
snake_case_ :Optional[int] = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , snake_case )
snake_case_ :int = torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
@slow
def lowerCAmelCase_ ( self: Tuple ) -> Dict:
snake_case_ :List[str] = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
snake_case_ :Optional[int] = model.to(snake_case )
snake_case_ :Optional[int] = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
snake_case_ :str = prepare_img()
snake_case_ :Union[str, Any] = image_processor(images=snake_case , return_tensors="""pt""" ).to(snake_case )
# forward pass
with torch.no_grad():
snake_case_ :Optional[int] = model(**snake_case )
snake_case_ :Optional[Any] = outputs.logits
# verify the logits
snake_case_ :List[str] = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , snake_case )
snake_case_ :Optional[int] = torch.tensor(
[
[[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]],
[[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]],
[[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]],
] , device=snake_case , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case , atol=1E-4 ) )
| 310 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_snake_case : List[str] = logging.get_logger(__name__)
_snake_case : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : Optional[Any] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
_snake_case : Union[str, Any] = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
_snake_case : Dict = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
_snake_case : Tuple = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
_snake_case : Tuple = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
_snake_case : Dict = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
_snake_case : Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
_snake_case : int = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
_snake_case : Tuple = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ = DPRContextEncoderTokenizer
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ = DPRQuestionEncoderTokenizer
_snake_case : Tuple = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
_snake_case : Optional[Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
_snake_case : Any = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_UpperCamelCase )
class _UpperCAmelCase :
"""simple docstring"""
def __call__( self : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Union[bool, str] = False , lowerCAmelCase_ : Union[bool, str] = False , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Optional[bool] = None , **lowerCAmelCase_ : Optional[Any] , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ , )
elif titles is None or texts is None:
__lowerCAmelCase = titles if texts is None else texts
return super().__call__(
lowerCAmelCase_ , lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ , )
__lowerCAmelCase = titles if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else [titles]
__lowerCAmelCase = texts if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else [texts]
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = questions if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else [questions] * n_passages
assert len(lowerCAmelCase_ ) == len(
lowerCAmelCase_ ), f"""There should be as many titles than texts but got {len(lowerCAmelCase_ )} titles and {len(lowerCAmelCase_ )} texts."""
__lowerCAmelCase = super().__call__(lowerCAmelCase_ , lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ )['input_ids']
__lowerCAmelCase = super().__call__(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ )['input_ids']
__lowerCAmelCase = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase_ , lowerCAmelCase_ )
]
}
if return_attention_mask is not False:
__lowerCAmelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__lowerCAmelCase = attention_mask
return self.pad(lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
def lowercase ( self : int , lowerCAmelCase_ : BatchEncoding , lowerCAmelCase_ : DPRReaderOutput , lowerCAmelCase_ : int = 1_6 , lowerCAmelCase_ : int = 6_4 , lowerCAmelCase_ : int = 4 , ) -> List[DPRSpanPrediction]:
__lowerCAmelCase = reader_input['input_ids']
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = reader_output[:3]
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = sorted(range(lowerCAmelCase_ ) , reverse=lowerCAmelCase_ , key=relevance_logits.__getitem__ )
__lowerCAmelCase = []
for doc_id in sorted_docs:
__lowerCAmelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__lowerCAmelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__lowerCAmelCase = sequence_ids.index(self.pad_token_id )
else:
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase_ , top_spans=lowerCAmelCase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase_ , start_index=lowerCAmelCase_ , end_index=lowerCAmelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCAmelCase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , ) -> List[DPRSpanPrediction]:
__lowerCAmelCase = []
for start_index, start_score in enumerate(lowerCAmelCase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__lowerCAmelCase = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x[1] , reverse=lowerCAmelCase_ )
__lowerCAmelCase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]"""
__lowerCAmelCase = end_index - start_index + 1
assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCamelCase )
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = READER_PRETRAINED_VOCAB_FILES_MAP
a_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = READER_PRETRAINED_INIT_CONFIGURATION
a_ = ["""input_ids""", """attention_mask"""]
a_ = DPRReaderTokenizer
| 53 |
'''simple docstring'''
import baseaa
def _lowercase ( __A ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("""utf-8""" ) )
def _lowercase ( __A ):
'''simple docstring'''
return baseaa.aaadecode(__A ).decode("""utf-8""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 601 | 0 |
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
A : Optional[int] = len(lowerCamelCase_ )
A : List[Any] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
A : Tuple = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
A : List[str] = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
A : str = subset[i - 1][j]
if arr[i - 1] <= j:
A : str = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 423 |
class __lowercase :
"""simple docstring"""
def __init__( self ) -> Optional[Any]:
A : Tuple = {}
def snake_case ( self ) -> None:
print(self.vertex )
for i in self.vertex:
print(__UpperCAmelCase , ''' -> ''' , ''' -> '''.join([str(__UpperCAmelCase ) for j in self.vertex[i]] ) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__UpperCAmelCase )
else:
# else make a new vertex
A : str = [to_vertex]
def snake_case ( self ) -> None:
# visited array for storing already visited nodes
A : int = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
# mark start vertex as visited
A : List[Any] = True
print(__UpperCAmelCase , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
lowercase : Dict = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 423 | 1 |
"""simple docstring"""
from __future__ import annotations
from functools import lru_cache
from math import ceil
__snake_case = 100
__snake_case = set(range(3, NUM_PRIMES, 2))
primes.add(2)
__snake_case = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def __lowerCAmelCase ( lowercase : int ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
snake_case : set[int] = set()
snake_case : int
snake_case : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def __lowerCAmelCase ( lowercase : int = 5000 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , lowercase ):
if len(partition(lowercase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 178 |
"""simple docstring"""
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
__snake_case = logging.getLogger(__name__)
require_version("""pytorch_lightning>=1.0.4""")
__snake_case = {
"""base""": AutoModel,
"""sequence-classification""": AutoModelForSequenceClassification,
"""question-answering""": AutoModelForQuestionAnswering,
"""pretraining""": AutoModelForPreTraining,
"""token-classification""": AutoModelForTokenClassification,
"""language-modeling""": AutoModelWithLMHead,
"""summarization""": AutoModelForSeqaSeqLM,
"""translation""": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
__snake_case = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
__snake_case = sorted(arg_to_scheduler.keys())
__snake_case = """{""" + """, """.join(arg_to_scheduler_choices) + """}"""
class _lowerCAmelCase ( pl.LightningModule ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="base" , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(UpperCamelCase__ )
snake_case : Tuple = 0
snake_case : List[str] = Path(self.hparams.output_dir )
snake_case : Any = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
snake_case : int = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"num_labels": num_labels} if num_labels is not None else {}) , cache_dir=UpperCamelCase__ , **UpperCamelCase__ , )
else:
snake_case : PretrainedConfig = config
snake_case : Any = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(self.hparams , UpperCamelCase__ , UpperCamelCase__ ):
assert hasattr(self.config , UpperCamelCase__ ), F'model config doesn\'t have a `{p}` attribute'
setattr(self.config , UpperCamelCase__ , getattr(self.hparams , UpperCamelCase__ ) )
if tokenizer is None:
snake_case : Dict = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=UpperCamelCase__ , )
else:
snake_case : PreTrainedTokenizer = tokenizer
snake_case : List[str] = MODEL_MODES[mode]
if model is None:
snake_case : Union[str, Any] = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(".ckpt" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=UpperCamelCase__ , )
else:
snake_case : List[Any] = model
def lowerCamelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = self.model_type.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : Dict = arg_to_scheduler[self.hparams.lr_scheduler]
snake_case : Any = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
snake_case : List[str] = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return scheduler
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = self.model
snake_case : Tuple = ["bias", "LayerNorm.weight"]
snake_case : Any = [
{
"params": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
if self.hparams.adafactor:
snake_case : Optional[int] = Adafactor(
UpperCamelCase__ , lr=self.hparams.learning_rate , scale_parameter=UpperCamelCase__ , relative_step=UpperCamelCase__ )
else:
snake_case : Optional[Any] = AdamW(
UpperCamelCase__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
snake_case : Any = optimizer
snake_case : List[Any] = self.get_lr_scheduler()
return [optimizer], [scheduler]
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return self.validation_step(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
return self.validation_end(UpperCamelCase__ )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : List[str] = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
snake_case : List[Any] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def lowerCamelCase ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if stage == "test":
snake_case : Dict = len(self.test_dataloader().dataset )
else:
snake_case : str = self.get_dataloader("train" , self.hparams.train_batch_size , shuffle=UpperCamelCase__ )
snake_case : Dict = len(self.train_dataloader().dataset )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> int:
'''simple docstring'''
raise NotImplementedError("You must implement this for your task" )
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
return self.train_loader
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
return self.get_dataloader("dev" , self.hparams.eval_batch_size , shuffle=UpperCamelCase__ )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.get_dataloader("test" , self.hparams.eval_batch_size , shuffle=UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , "cached_{}_{}_{}".format(
UpperCamelCase__ , list(filter(UpperCamelCase__ , self.hparams.model_name_or_path.split("/" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def lowerCamelCase ( self , UpperCamelCase__ ) -> None:
'''simple docstring'''
snake_case : str = self.output_dir.joinpath("best_tfmr" )
snake_case : int = self.step_count
self.model.save_pretrained(UpperCamelCase__ )
self.tokenizer.save_pretrained(UpperCamelCase__ )
@staticmethod
def lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
parser.add_argument(
"--model_name_or_path" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--config_name" , default="" , type=UpperCamelCase__ , help="Pretrained config name or path if not the same as model_name" )
parser.add_argument(
"--tokenizer_name" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument(
"--cache_dir" , default=str(Path(UpperCamelCase__ ).parent / "test_run" / "cache" ) , type=UpperCamelCase__ , help="Where do you want to store the pre-trained models downloaded from huggingface.co" , )
parser.add_argument(
"--encoder_layerdrop" , type=UpperCamelCase__ , help="Encoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--decoder_layerdrop" , type=UpperCamelCase__ , help="Decoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--dropout" , type=UpperCamelCase__ , help="Dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--attention_dropout" , type=UpperCamelCase__ , help="Attention dropout probability (Optional). Goes into model.config" , )
parser.add_argument("--learning_rate" , default=5e-5 , type=UpperCamelCase__ , help="The initial learning rate for Adam." )
parser.add_argument(
"--lr_scheduler" , default="linear" , choices=UpperCamelCase__ , metavar=UpperCamelCase__ , type=UpperCamelCase__ , help="Learning rate scheduler" , )
parser.add_argument("--weight_decay" , default=0.0 , type=UpperCamelCase__ , help="Weight decay if we apply some." )
parser.add_argument("--adam_epsilon" , default=1e-8 , type=UpperCamelCase__ , help="Epsilon for Adam optimizer." )
parser.add_argument("--warmup_steps" , default=0 , type=UpperCamelCase__ , help="Linear warmup over warmup_steps." )
parser.add_argument("--num_workers" , default=4 , type=UpperCamelCase__ , help="kwarg passed to DataLoader" )
parser.add_argument("--num_train_epochs" , dest="max_epochs" , default=3 , type=UpperCamelCase__ )
parser.add_argument("--train_batch_size" , default=32 , type=UpperCamelCase__ )
parser.add_argument("--eval_batch_size" , default=32 , type=UpperCamelCase__ )
parser.add_argument("--adafactor" , action="store_true" )
class _lowerCAmelCase ( pl.Callback ):
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _lowerCAmelCase ( pl.Callback ):
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(UpperCamelCase__ )
class _lowerCAmelCase ( pl.Callback ):
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
snake_case : List[str] = trainer.lr_schedulers[0]["scheduler"]
snake_case : str = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
rank_zero_info("***** Validation results *****" )
snake_case : List[str] = trainer.callback_metrics
# Log results
for key in sorted(UpperCamelCase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(UpperCamelCase__ , str(metrics[key] ) ) )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
rank_zero_info("***** Test results *****" )
snake_case : Dict = trainer.callback_metrics
# Log and save results to file
snake_case : Union[str, Any] = os.path.join(pl_module.hparams.output_dir , "test_results.txt" )
with open(UpperCamelCase__ , "w" ) as writer:
for key in sorted(UpperCamelCase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(UpperCamelCase__ , str(metrics[key] ) ) )
writer.write("{} = {}\n".format(UpperCamelCase__ , str(metrics[key] ) ) )
def __lowerCAmelCase ( lowercase : Any , lowercase : str ) -> None:
"""simple docstring"""
parser.add_argument(
"--output_dir" , default=str(Path(lowercase ).parent / "test_run" / "model_checkpoints" ) , type=lowercase , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=lowercase , default="O2" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_tpu_cores" , dest="tpu_cores" , type=lowercase )
parser.add_argument("--max_grad_norm" , dest="gradient_clip_val" , default=1.0 , type=lowercase , help="Max gradient norm" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_predict" , action="store_true" , help="Whether to run predictions on the test set." )
parser.add_argument(
"--gradient_accumulation_steps" , dest="accumulate_grad_batches" , type=lowercase , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--seed" , type=lowercase , default=42 , help="random seed for initialization" )
parser.add_argument(
"--data_dir" , default=str(Path(lowercase ).parent / "test_run" / "dummy-train-data" ) , type=lowercase , help="The input data dir. Should contain the training files for the CoNLL-2003 NER task." , )
def __lowerCAmelCase ( lowercase : BaseTransformer , lowercase : argparse.Namespace , lowercase : Any=None , lowercase : List[str]=True , lowercase : List[Any]=[] , lowercase : Any=None , lowercase : Optional[int]=None , **lowercase : List[Any] , ) -> Tuple:
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
snake_case : Optional[int] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowercase )
# add custom checkpoints
if checkpoint_callback is None:
snake_case : int = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="checkpoint" , monitor="val_loss" , mode="min" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowercase )
if logging_callback is None:
snake_case : Tuple = LoggingCallback()
snake_case : str = {}
if args.fpaa:
snake_case : Union[str, Any] = 16
if args.gpus > 1:
snake_case : List[str] = "auto"
snake_case : int = "ddp"
snake_case : Dict = args.accumulate_grad_batches
snake_case : Tuple = None
snake_case : Any = "auto"
snake_case : int = pl.Trainer.from_argparse_args(
lowercase , weights_summary=lowercase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowercase , val_check_interval=1 , num_sanity_val_steps=2 , **lowercase , )
if args.do_train:
trainer.fit(lowercase )
else:
print("RAG modeling tests with new set functions successfuly executed!" )
return trainer
| 178 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : str = logging.get_logger(__name__)
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Any = original_name.split(""".""" )[0]
_UpperCAmelCase : str = key.split(""".""" )
_UpperCAmelCase : int = int(key_list[key_list.index(lowerCAmelCase_ ) - 2] )
_UpperCAmelCase : Any = int(key_list[key_list.index(lowerCAmelCase_ ) - 1] )
_UpperCAmelCase : Optional[Any] = orig_block_num - offset
_UpperCAmelCase : Tuple = key.replace(f"{orig_block_num}.{layer_num}.{original_name}" , f"block.{new_block_num}.{layer_num}.{new_name}" )
return key
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = OrderedDict()
_UpperCAmelCase , _UpperCAmelCase : Tuple = 0, 0
for key, value in state_dict.items():
if key.startswith("""network""" ):
_UpperCAmelCase : Tuple = key.replace("""network""" , """poolformer.encoder""" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("""bias""" ) and "patch_embed" not in key:
patch_emb_offset += 1
_UpperCAmelCase : str = key[: key.find("""proj""" )]
_UpperCAmelCase : List[str] = key.replace(lowerCAmelCase_ , f"patch_embeddings.{total_embed_found}." )
_UpperCAmelCase : str = key.replace("""proj""" , """projection""" )
if key.endswith("""bias""" ):
total_embed_found += 1
if "patch_embeddings" in key:
_UpperCAmelCase : Optional[Any] = """poolformer.encoder.""" + key
if "mlp.fc1" in key:
_UpperCAmelCase : Dict = replace_key_with_offset(lowerCAmelCase_ , lowerCAmelCase_ , """mlp.fc1""" , """output.conv1""" )
if "mlp.fc2" in key:
_UpperCAmelCase : Dict = replace_key_with_offset(lowerCAmelCase_ , lowerCAmelCase_ , """mlp.fc2""" , """output.conv2""" )
if "norm1" in key:
_UpperCAmelCase : Optional[int] = replace_key_with_offset(lowerCAmelCase_ , lowerCAmelCase_ , """norm1""" , """before_norm""" )
if "norm2" in key:
_UpperCAmelCase : Tuple = replace_key_with_offset(lowerCAmelCase_ , lowerCAmelCase_ , """norm2""" , """after_norm""" )
if "layer_scale_1" in key:
_UpperCAmelCase : Dict = replace_key_with_offset(lowerCAmelCase_ , lowerCAmelCase_ , """layer_scale_1""" , """layer_scale_1""" )
if "layer_scale_2" in key:
_UpperCAmelCase : Dict = replace_key_with_offset(lowerCAmelCase_ , lowerCAmelCase_ , """layer_scale_2""" , """layer_scale_2""" )
if "head" in key:
_UpperCAmelCase : str = key.replace("""head""" , """classifier""" )
_UpperCAmelCase : List[Any] = value
return new_state_dict
def __A ( ):
_UpperCAmelCase : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase : int = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return image
@torch.no_grad()
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : List[str] = PoolFormerConfig()
# set attributes based on model_name
_UpperCAmelCase : Dict = """huggingface/label-files"""
_UpperCAmelCase : Tuple = model_name[-3:]
_UpperCAmelCase : List[Any] = 1000
_UpperCAmelCase : Dict = """imagenet-1k-id2label.json"""
_UpperCAmelCase : str = (1, 1000)
# set config attributes
_UpperCAmelCase : Dict = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
_UpperCAmelCase : List[str] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
_UpperCAmelCase : Optional[int] = idalabel
_UpperCAmelCase : List[str] = {v: k for k, v in idalabel.items()}
if size == "s12":
_UpperCAmelCase : str = [2, 2, 6, 2]
_UpperCAmelCase : Optional[int] = [64, 128, 320, 512]
_UpperCAmelCase : Optional[Any] = 4.0
_UpperCAmelCase : List[Any] = 0.9
elif size == "s24":
_UpperCAmelCase : List[str] = [4, 4, 12, 4]
_UpperCAmelCase : str = [64, 128, 320, 512]
_UpperCAmelCase : List[str] = 4.0
_UpperCAmelCase : int = 0.9
elif size == "s36":
_UpperCAmelCase : List[Any] = [6, 6, 18, 6]
_UpperCAmelCase : Tuple = [64, 128, 320, 512]
_UpperCAmelCase : Union[str, Any] = 4.0
_UpperCAmelCase : int = 1e-6
_UpperCAmelCase : Optional[Any] = 0.9
elif size == "m36":
_UpperCAmelCase : Any = [6, 6, 18, 6]
_UpperCAmelCase : Optional[Any] = [96, 192, 384, 768]
_UpperCAmelCase : str = 4.0
_UpperCAmelCase : int = 1e-6
_UpperCAmelCase : Union[str, Any] = 0.95
elif size == "m48":
_UpperCAmelCase : str = [8, 8, 24, 8]
_UpperCAmelCase : int = [96, 192, 384, 768]
_UpperCAmelCase : Dict = 4.0
_UpperCAmelCase : Tuple = 1e-6
_UpperCAmelCase : int = 0.95
else:
raise ValueError(f"Size {size} not supported" )
# load image processor
_UpperCAmelCase : Any = PoolFormerImageProcessor(crop_pct=lowerCAmelCase_ )
# Prepare image
_UpperCAmelCase : List[str] = prepare_img()
_UpperCAmelCase : List[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values
logger.info(f"Converting model {model_name}..." )
# load original state dict
_UpperCAmelCase : Optional[Any] = torch.load(lowerCAmelCase_ , map_location=torch.device("""cpu""" ) )
# rename keys
_UpperCAmelCase : int = rename_keys(lowerCAmelCase_ )
# create HuggingFace model and load state dict
_UpperCAmelCase : Optional[Any] = PoolFormerForImageClassification(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
model.eval()
# Define image processor
_UpperCAmelCase : str = PoolFormerImageProcessor(crop_pct=lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" ).pixel_values
# forward pass
_UpperCAmelCase : Union[str, Any] = model(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = outputs.logits
# define expected logit slices for different models
if size == "s12":
_UpperCAmelCase : List[Any] = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
_UpperCAmelCase : List[Any] = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
_UpperCAmelCase : List[Any] = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
_UpperCAmelCase : str = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
_UpperCAmelCase : List[str] = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f"Size {size} not supported" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1e-2 )
# finally, save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase_ : List[Any] = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 156 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : int = tmp_path / """cache"""
_UpperCAmelCase : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase : Tuple = JsonDatasetReader(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ ).read()
_check_json_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Any = tmp_path / """cache"""
_UpperCAmelCase : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase : int = features.copy() if features else default_expected_features
_UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase : Any = JsonDatasetReader(lowerCAmelCase_ , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
_check_json_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
] , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Any = tmp_path / """cache"""
_UpperCAmelCase : Optional[Any] = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
_UpperCAmelCase : int = features.copy() if features else default_expected_features
_UpperCAmelCase : Optional[Any] = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase : Dict = JsonDatasetReader(lowerCAmelCase_ , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
_UpperCAmelCase : Union[str, Any] = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
_UpperCAmelCase : Optional[Any] = features.copy()
_UpperCAmelCase : Any = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase : Tuple = tmp_path / """cache"""
_UpperCAmelCase : Optional[Any] = JsonDatasetReader(lowerCAmelCase_ , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Dict = tmp_path / """cache"""
_UpperCAmelCase : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase : List[Any] = JsonDatasetReader(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , split=lowerCAmelCase_ ).read()
_check_json_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
if issubclass(lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Any = jsonl_path
elif issubclass(lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Union[str, Any] = [jsonl_path]
_UpperCAmelCase : int = tmp_path / """cache"""
_UpperCAmelCase : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase : Any = JsonDatasetReader(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
_check_json_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=("train",) ):
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
for split in splits:
_UpperCAmelCase : List[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = tmp_path / """cache"""
_UpperCAmelCase : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase : List[Any] = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ ).read()
_check_json_datasetdict(lowerCAmelCase_ , lowerCAmelCase_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Optional[int] = tmp_path / """cache"""
_UpperCAmelCase : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase : List[str] = features.copy() if features else default_expected_features
_UpperCAmelCase : int = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase : Any = JsonDatasetReader({"""train""": jsonl_path} , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
_check_json_datasetdict(lowerCAmelCase_ , lowerCAmelCase_ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
if split:
_UpperCAmelCase : str = {split: jsonl_path}
else:
_UpperCAmelCase : int = """train"""
_UpperCAmelCase : int = {"""train""": jsonl_path, """test""": jsonl_path}
_UpperCAmelCase : Optional[int] = tmp_path / """cache"""
_UpperCAmelCase : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase : Optional[Any] = JsonDatasetReader(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
_check_json_datasetdict(lowerCAmelCase_ , lowerCAmelCase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __A ( lowerCAmelCase_ ):
return json.load(lowerCAmelCase_ )
def __A ( lowerCAmelCase_ ):
return [json.loads(lowerCAmelCase_ ) for line in buffer]
class __lowerCAmelCase :
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , lines=lowerCAmelCase__ ).write()
buffer.seek(0 )
_UpperCAmelCase : Optional[int] = load_json_function(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert isinstance(exported_content[0] , lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == 1_0
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , lines=lowerCAmelCase__ , orient=lowerCAmelCase__ ).write()
buffer.seek(0 )
_UpperCAmelCase : str = load_json(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCAmelCase__ , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(lowerCAmelCase__ ) == 1_0
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , lines=lowerCAmelCase__ , num_proc=2 ).write()
buffer.seek(0 )
_UpperCAmelCase : Optional[int] = load_json_function(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert isinstance(exported_content[0] , lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == 1_0
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , lines=lowerCAmelCase__ , orient=lowerCAmelCase__ , num_proc=2 ).write()
buffer.seek(0 )
_UpperCAmelCase : Optional[Any] = load_json(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCAmelCase__ , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(lowerCAmelCase__ ) == 1_0
def snake_case_ (self , lowerCAmelCase__ ):
with pytest.raises(lowerCAmelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=0 )
@pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Dict = tmp_path_factory.mktemp("""data""" ) / F"test.json.{extension}"
_UpperCAmelCase : List[Any] = str(shared_datadir / F"test_file.json.{extension}" )
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , compression=lowerCAmelCase__ ).write()
with fsspec.open(lowerCAmelCase__ , """rb""" , compression="""infer""" ) as f:
_UpperCAmelCase : str = f.read()
with fsspec.open(lowerCAmelCase__ , """rb""" , compression="""infer""" ) as f:
_UpperCAmelCase : Optional[int] = f.read()
assert exported_content == original_content
| 156 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = BlenderbotSmallTokenizer
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
super().setUp()
_lowerCamelCase : Tuple = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
_lowerCamelCase : int = dict(zip(__A,range(len(__A ) ) ) )
_lowerCamelCase : Any = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
_lowerCamelCase : List[str] = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
_lowerCamelCase : List[Any] = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : List[Any] = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file,"w",encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def lowerCamelCase_ ( self : Tuple,**__A : Tuple ):
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : List[Any],__A : List[str] ):
_lowerCamelCase : Optional[Any] = "adapt act apte"
_lowerCamelCase : Optional[Any] = "adapt act apte"
return input_text, output_text
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : int = BlenderbotSmallTokenizer(self.vocab_file,self.merges_file,**self.special_tokens_map )
_lowerCamelCase : Optional[Any] = "adapt act apte"
_lowerCamelCase : Union[str, Any] = ["adapt", "act", "ap@@", "te"]
_lowerCamelCase : Tuple = tokenizer.tokenize(__A )
self.assertListEqual(__A,__A )
_lowerCamelCase : List[str] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_lowerCamelCase : Union[str, Any] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Optional[Any] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1_3_8_4]
_lowerCamelCase : Optional[int] = "I am a small frog."
_lowerCamelCase : Optional[int] = tok([src_text],padding=__A,truncation=__A )["input_ids"]
_lowerCamelCase : List[str] = tok.batch_decode(__A,skip_special_tokens=__A,clean_up_tokenization_spaces=__A )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[Any] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
_lowerCamelCase : Dict = "I am a small frog ."
_lowerCamelCase : int = "."
_lowerCamelCase : Tuple = tok(__A )["input_ids"]
_lowerCamelCase : int = tok(__A )["input_ids"]
assert encoded[-1] == encoded_dot[0] | 44 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase : Optional[int] ={
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] =["""ChineseCLIPFeatureExtractor"""]
__lowerCAmelCase : List[Any] =["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple =[
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCAmelCase : Optional[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 359 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Tuple=None ) -> Union[str, Any]:
__lowerCAmelCase : Dict = argparse.ArgumentParser(add_help=SCREAMING_SNAKE_CASE , allow_abbrev=SCREAMING_SNAKE_CASE )
# The main config parser
__lowerCAmelCase : Dict = config_command_parser(SCREAMING_SNAKE_CASE )
# The subparser to add commands to
__lowerCAmelCase : Optional[int] = config_parser.add_subparsers(title="""subcommands""" , dest="""subcommand""" )
# Then add other parsers with the parent parser
default_command_parser(SCREAMING_SNAKE_CASE , parents=[parent_parser] )
update_command_parser(SCREAMING_SNAKE_CASE , parents=[parent_parser] )
return config_parser
def _SCREAMING_SNAKE_CASE ( ) -> str:
__lowerCAmelCase : Tuple = get_config_parser()
__lowerCAmelCase : int = config_parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE , """func""" ):
config_parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main() | 240 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
_UpperCAmelCase = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Tuple ) -> Optional[int]:
for attribute in key.split(""".""" ):
__lowerCAmelCase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
__lowerCAmelCase : Tuple = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
__lowerCAmelCase : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__lowerCAmelCase : Union[str, Any] = value
elif weight_type == "weight_g":
__lowerCAmelCase : Any = value
elif weight_type == "weight_v":
__lowerCAmelCase : Tuple = value
elif weight_type == "bias":
__lowerCAmelCase : str = value
else:
__lowerCAmelCase : Any = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :List[str] ) -> Any:
__lowerCAmelCase : Any = []
__lowerCAmelCase : Union[str, Any] = fairseq_model.state_dict()
__lowerCAmelCase : Any = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__lowerCAmelCase : int = None
for name, value in fairseq_dict.items():
__lowerCAmelCase : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == """group""" , )
__lowerCAmelCase : Tuple = True
elif name.split(""".""" )[0] == "proj":
__lowerCAmelCase : Tuple = fairseq_model.proj
__lowerCAmelCase : Any = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowerCAmelCase : int = True
if "*" in mapped_key:
__lowerCAmelCase : Union[str, Any] = name.split(SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2]
__lowerCAmelCase : List[str] = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__lowerCAmelCase : Tuple = """weight_g"""
elif "weight_v" in name:
__lowerCAmelCase : int = """weight_v"""
elif "bias" in name:
__lowerCAmelCase : Tuple = """bias"""
elif "weight" in name:
__lowerCAmelCase : int = """weight"""
else:
__lowerCAmelCase : Tuple = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[Any] ) -> Tuple:
__lowerCAmelCase : Union[str, Any] = full_name.split("""conv_layers.""" )[-1]
__lowerCAmelCase : List[Any] = name.split(""".""" )
__lowerCAmelCase : Any = int(items[0] )
__lowerCAmelCase : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__lowerCAmelCase : Dict = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__lowerCAmelCase : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__lowerCAmelCase : List[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__lowerCAmelCase : Dict = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[Any] ) -> List[Any]:
__lowerCAmelCase , __lowerCAmelCase : str = emb.weight.shape
__lowerCAmelCase : List[Any] = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = emb.weight.data
return lin_layer
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Any ) -> Dict:
with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
__lowerCAmelCase : List[Any] = f.readlines()
__lowerCAmelCase : Any = [line.split(""" """ )[0] for line in lines]
__lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :int , ) -> List[str]:
__lowerCAmelCase : Any = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = SpeechaTextaConfig.from_pretrained(
SCREAMING_SNAKE_CASE , vocab_size=SCREAMING_SNAKE_CASE , decoder_layers=SCREAMING_SNAKE_CASE , do_stable_layer_norm=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
__lowerCAmelCase : int = model[0].eval()
# set weights for wav2vec2 encoder
__lowerCAmelCase : int = WavaVecaModel(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = SpeechaTextaForCausalLM(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Dict = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
__lowerCAmelCase : Dict = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
__lowerCAmelCase : Union[str, Any] = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = False
# add projection layer
__lowerCAmelCase : str = nn.Parameter(projection_layer.weight )
__lowerCAmelCase : str = nn.Parameter(projection_layer.bias )
__lowerCAmelCase : Dict = create_vocab_dict(SCREAMING_SNAKE_CASE )
with open(os.path.join(SCREAMING_SNAKE_CASE , """vocab.json""" ) , """w""" ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = SpeechaTextaTokenizer(os.path.join(SCREAMING_SNAKE_CASE , """vocab.json""" ) )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = hf_wavavec.config.to_dict()
__lowerCAmelCase : int = tokenizer.pad_token_id
__lowerCAmelCase : List[str] = tokenizer.bos_token_id
__lowerCAmelCase : Union[str, Any] = tokenizer.eos_token_id
__lowerCAmelCase : Any = """speech_to_text_2"""
__lowerCAmelCase : Tuple = """wav2vec2"""
__lowerCAmelCase : Tuple = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0224, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
_UpperCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
) | 240 | 1 |
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class _lowerCAmelCase ( snake_case_ ):
def __get__( self , UpperCamelCase__ , UpperCamelCase__=None ) -> Union[str, Any]:
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
snake_case : Optional[Any] = "__cached_" + self.fget.__name__
snake_case : str = getattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if cached is None:
snake_case : Union[str, Any] = self.fget(UpperCamelCase__ )
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return cached
def __lowerCAmelCase ( lowercase : Dict ) -> Dict:
"""simple docstring"""
snake_case : List[Any] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F'invalid truth value {val!r}' )
def __lowerCAmelCase ( lowercase : Tuple ) -> Optional[Any]:
"""simple docstring"""
if is_torch_fx_proxy(lowercase ):
return True
if is_torch_available():
import torch
if isinstance(lowercase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowercase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowercase , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowercase , np.ndarray )
def __lowerCAmelCase ( lowercase : int ) -> Any:
"""simple docstring"""
return isinstance(lowercase , np.ndarray )
def __lowerCAmelCase ( lowercase : str ) -> int:
"""simple docstring"""
return _is_numpy(lowercase )
def __lowerCAmelCase ( lowercase : Tuple ) -> Optional[Any]:
"""simple docstring"""
import torch
return isinstance(lowercase , torch.Tensor )
def __lowerCAmelCase ( lowercase : Dict ) -> Dict:
"""simple docstring"""
return False if not is_torch_available() else _is_torch(lowercase )
def __lowerCAmelCase ( lowercase : str ) -> str:
"""simple docstring"""
import torch
return isinstance(lowercase , torch.device )
def __lowerCAmelCase ( lowercase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return False if not is_torch_available() else _is_torch_device(lowercase )
def __lowerCAmelCase ( lowercase : str ) -> Optional[Any]:
"""simple docstring"""
import torch
if isinstance(lowercase , lowercase ):
if hasattr(lowercase , lowercase ):
snake_case : Any = getattr(lowercase , lowercase )
else:
return False
return isinstance(lowercase , torch.dtype )
def __lowerCAmelCase ( lowercase : Dict ) -> Optional[int]:
"""simple docstring"""
return False if not is_torch_available() else _is_torch_dtype(lowercase )
def __lowerCAmelCase ( lowercase : Dict ) -> Any:
"""simple docstring"""
import tensorflow as tf
return isinstance(lowercase , tf.Tensor )
def __lowerCAmelCase ( lowercase : List[str] ) -> int:
"""simple docstring"""
return False if not is_tf_available() else _is_tensorflow(lowercase )
def __lowerCAmelCase ( lowercase : List[str] ) -> Dict:
"""simple docstring"""
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowercase , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(lowercase )
return type(lowercase ) == tf.Tensor
def __lowerCAmelCase ( lowercase : int ) -> str:
"""simple docstring"""
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowercase )
def __lowerCAmelCase ( lowercase : Any ) -> Optional[Any]:
"""simple docstring"""
import jax.numpy as jnp # noqa: F811
return isinstance(lowercase , jnp.ndarray )
def __lowerCAmelCase ( lowercase : List[str] ) -> Optional[int]:
"""simple docstring"""
return False if not is_flax_available() else _is_jax(lowercase )
def __lowerCAmelCase ( lowercase : Any ) -> Tuple:
"""simple docstring"""
if isinstance(lowercase , (dict, UserDict) ):
return {k: to_py_obj(lowercase ) for k, v in obj.items()}
elif isinstance(lowercase , (list, tuple) ):
return [to_py_obj(lowercase ) for o in obj]
elif is_tf_tensor(lowercase ):
return obj.numpy().tolist()
elif is_torch_tensor(lowercase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowercase ):
return np.asarray(lowercase ).tolist()
elif isinstance(lowercase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def __lowerCAmelCase ( lowercase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if isinstance(lowercase , (dict, UserDict) ):
return {k: to_numpy(lowercase ) for k, v in obj.items()}
elif isinstance(lowercase , (list, tuple) ):
return np.array(lowercase )
elif is_tf_tensor(lowercase ):
return obj.numpy()
elif is_torch_tensor(lowercase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowercase ):
return np.asarray(lowercase )
else:
return obj
class _lowerCAmelCase ( snake_case_ ):
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : Dict = fields(self )
# Safety and consistency checks
if not len(UpperCamelCase__ ):
raise ValueError(F'{self.__class__.__name__} has no fields.' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'{self.__class__.__name__} should not have more than one required field.' )
snake_case : List[Any] = getattr(self , class_fields[0].name )
snake_case : int = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(UpperCamelCase__ ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case : Any = first_field.items()
snake_case : List[str] = True
else:
try:
snake_case : int = iter(UpperCamelCase__ )
snake_case : Optional[int] = True
except TypeError:
snake_case : Any = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(UpperCamelCase__ ):
if (
not isinstance(UpperCamelCase__ , (list, tuple) )
or not len(UpperCamelCase__ ) == 2
or not isinstance(element[0] , UpperCamelCase__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
snake_case : int = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'Cannot set key/value for {element}. It needs to be a tuple (key, value).' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
snake_case : List[str] = element[1]
elif first_field is not None:
snake_case : List[Any] = first_field
else:
for field in class_fields:
snake_case : Tuple = getattr(self , field.name )
if v is not None:
snake_case : Optional[int] = v
def __delitem__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
raise Exception(F'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.' )
def lowerCamelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
raise Exception(F'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.' )
def lowerCamelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
raise Exception(F'You cannot use ``pop`` on a {self.__class__.__name__} instance.' )
def lowerCamelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
raise Exception(F'You cannot use ``update`` on a {self.__class__.__name__} instance.' )
def __getitem__( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case : Optional[int] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(UpperCamelCase__ , UpperCamelCase__ )
super().__setattr__(UpperCamelCase__ , UpperCamelCase__ )
def __setitem__( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
super().__setitem__(UpperCamelCase__ , UpperCamelCase__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase ( self ) -> Tuple[Any]:
'''simple docstring'''
return tuple(self[k] for k in self.keys() )
class _lowerCAmelCase ( snake_case_ , snake_case_ ):
@classmethod
def lowerCamelCase ( cls , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
raise ValueError(
F'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}' )
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Optional[Any] = '''longest'''
__UpperCAmelCase : Tuple = '''max_length'''
__UpperCAmelCase : Optional[Any] = '''do_not_pad'''
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Tuple = '''pt'''
__UpperCAmelCase : int = '''tf'''
__UpperCAmelCase : List[str] = '''np'''
__UpperCAmelCase : Union[str, Any] = '''jax'''
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
snake_case : Tuple = context_managers
snake_case : Union[str, Any] = ExitStack()
def __enter__( self ) -> List[Any]:
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(UpperCamelCase__ )
def __exit__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
self.stack.__exit__(*UpperCamelCase__ , **UpperCamelCase__ )
def __lowerCAmelCase ( lowercase : List[Any] ) -> List[str]:
"""simple docstring"""
snake_case : str = infer_framework(lowercase )
if framework == "tf":
snake_case : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
snake_case : Dict = inspect.signature(model_class.forward ) # PyTorch models
else:
snake_case : Optional[Any] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def __lowerCAmelCase ( lowercase : int ) -> Optional[int]:
"""simple docstring"""
snake_case : Tuple = model_class.__name__
snake_case : Dict = infer_framework(lowercase )
if framework == "tf":
snake_case : str = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
snake_case : Optional[int] = inspect.signature(model_class.forward ) # PyTorch models
else:
snake_case : List[str] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def __lowerCAmelCase ( lowercase : MutableMapping , lowercase : str = "" , lowercase : str = "." ) -> Tuple:
"""simple docstring"""
def _flatten_dict(lowercase : List[Any] , lowercase : str="" , lowercase : Tuple="." ):
for k, v in d.items():
snake_case : Any = str(lowercase ) + delimiter + str(lowercase ) if parent_key else k
if v and isinstance(lowercase , lowercase ):
yield from flatten_dict(lowercase , lowercase , delimiter=lowercase ).items()
else:
yield key, v
return dict(_flatten_dict(lowercase , lowercase , lowercase ) )
@contextmanager
def __lowerCAmelCase ( lowercase : str , lowercase : bool = False ) -> Union[str, Any]:
"""simple docstring"""
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def __lowerCAmelCase ( lowercase : List[Any] , lowercase : Any=None ) -> Optional[int]:
"""simple docstring"""
if is_numpy_array(lowercase ):
return np.transpose(lowercase , axes=lowercase )
elif is_torch_tensor(lowercase ):
return array.T if axes is None else array.permute(*lowercase )
elif is_tf_tensor(lowercase ):
import tensorflow as tf
return tf.transpose(lowercase , perm=lowercase )
elif is_jax_tensor(lowercase ):
return jnp.transpose(lowercase , axes=lowercase )
else:
raise ValueError(F'Type not supported for transpose: {type(lowercase )}.' )
def __lowerCAmelCase ( lowercase : Union[str, Any] , lowercase : Any ) -> Union[str, Any]:
"""simple docstring"""
if is_numpy_array(lowercase ):
return np.reshape(lowercase , lowercase )
elif is_torch_tensor(lowercase ):
return array.reshape(*lowercase )
elif is_tf_tensor(lowercase ):
import tensorflow as tf
return tf.reshape(lowercase , lowercase )
elif is_jax_tensor(lowercase ):
return jnp.reshape(lowercase , lowercase )
else:
raise ValueError(F'Type not supported for reshape: {type(lowercase )}.' )
def __lowerCAmelCase ( lowercase : str , lowercase : List[Any]=None ) -> str:
"""simple docstring"""
if is_numpy_array(lowercase ):
return np.squeeze(lowercase , axis=lowercase )
elif is_torch_tensor(lowercase ):
return array.squeeze() if axis is None else array.squeeze(dim=lowercase )
elif is_tf_tensor(lowercase ):
import tensorflow as tf
return tf.squeeze(lowercase , axis=lowercase )
elif is_jax_tensor(lowercase ):
return jnp.squeeze(lowercase , axis=lowercase )
else:
raise ValueError(F'Type not supported for squeeze: {type(lowercase )}.' )
def __lowerCAmelCase ( lowercase : Optional[Any] , lowercase : List[Any] ) -> Any:
"""simple docstring"""
if is_numpy_array(lowercase ):
return np.expand_dims(lowercase , lowercase )
elif is_torch_tensor(lowercase ):
return array.unsqueeze(dim=lowercase )
elif is_tf_tensor(lowercase ):
import tensorflow as tf
return tf.expand_dims(lowercase , axis=lowercase )
elif is_jax_tensor(lowercase ):
return jnp.expand_dims(lowercase , axis=lowercase )
else:
raise ValueError(F'Type not supported for expand_dims: {type(lowercase )}.' )
def __lowerCAmelCase ( lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if is_numpy_array(lowercase ):
return np.size(lowercase )
elif is_torch_tensor(lowercase ):
return array.numel()
elif is_tf_tensor(lowercase ):
import tensorflow as tf
return tf.size(lowercase )
elif is_jax_tensor(lowercase ):
return array.size
else:
raise ValueError(F'Type not supported for expand_dims: {type(lowercase )}.' )
def __lowerCAmelCase ( lowercase : str , lowercase : Any ) -> Tuple:
"""simple docstring"""
for key, value in auto_map.items():
if isinstance(lowercase , (tuple, list) ):
snake_case : int = [F'{repo_id}--{v}' if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
snake_case : str = F'{repo_id}--{value}'
return auto_map
def __lowerCAmelCase ( lowercase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
for base_class in inspect.getmro(lowercase ):
snake_case : Dict = base_class.__module__
snake_case : int = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F'Could not infer framework from class {model_class}.' )
| 178 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
__snake_case = logging.get_logger(__name__)
class _lowerCAmelCase ( snake_case_ ):
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead." , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 178 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __magic_name__ ( __UpperCAmelCase ):
__A : Any = "mobilenet_v2"
def __init__( self : int , snake_case__ : Any=3 , snake_case__ : Optional[int]=2_2_4 , snake_case__ : str=1.0 , snake_case__ : Optional[int]=8 , snake_case__ : str=8 , snake_case__ : Tuple=6 , snake_case__ : Any=3_2 , snake_case__ : Dict=True , snake_case__ : Optional[int]=True , snake_case__ : Any="relu6" , snake_case__ : int=True , snake_case__ : Any=0.8 , snake_case__ : Dict=0.02 , snake_case__ : int=0.0_01 , snake_case__ : List[str]=2_5_5 , **snake_case__ : List[str] , ):
'''simple docstring'''
super().__init__(**snake_case__ )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
lowercase :str = num_channels
lowercase :Tuple = image_size
lowercase :Dict = depth_multiplier
lowercase :List[str] = depth_divisible_by
lowercase :Tuple = min_depth
lowercase :Tuple = expand_ratio
lowercase :Union[str, Any] = output_stride
lowercase :List[Any] = first_layer_is_expansion
lowercase :Any = finegrained_output
lowercase :int = hidden_act
lowercase :Optional[Any] = tf_padding
lowercase :Optional[Any] = classifier_dropout_prob
lowercase :str = initializer_range
lowercase :int = layer_norm_eps
lowercase :int = semantic_loss_ignore_index
class __magic_name__ ( __UpperCAmelCase ):
__A : Optional[int] = version.parse("1.11" )
@property
def __snake_case ( self : Any ):
'''simple docstring'''
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __snake_case ( self : List[Any] ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __snake_case ( self : List[Any] ):
'''simple docstring'''
return 1e-4
| 475 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __magic_name__ ( __UpperCAmelCase ):
__A : UNetaDModel
__A : ScoreSdeVeScheduler
def __init__( self : Optional[Any] , snake_case__ : UNetaDModel , snake_case__ : ScoreSdeVeScheduler ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=snake_case__ , scheduler=snake_case__ )
@torch.no_grad()
def __call__( self : Tuple , snake_case__ : int = 1 , snake_case__ : int = 2_0_0_0 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , **snake_case__ : Optional[Any] , ):
'''simple docstring'''
lowercase :str = self.unet.config.sample_size
lowercase :str = (batch_size, 3, img_size, img_size)
lowercase :Any = self.unet
lowercase :Dict = randn_tensor(snake_case__ , generator=snake_case__ ) * self.scheduler.init_noise_sigma
lowercase :int = sample.to(self.device )
self.scheduler.set_timesteps(snake_case__ )
self.scheduler.set_sigmas(snake_case__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase :Optional[int] = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowercase :str = self.unet(snake_case__ , snake_case__ ).sample
lowercase :Optional[Any] = self.scheduler.step_correct(snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
# prediction step
lowercase :Optional[Any] = model(snake_case__ , snake_case__ ).sample
lowercase :Dict = self.scheduler.step_pred(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ )
lowercase , lowercase :Tuple = output.prev_sample, output.prev_sample_mean
lowercase :str = sample_mean.clamp(0 , 1 )
lowercase :int = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase :str = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=snake_case__ )
| 475 | 1 |
def _A ( _lowercase = 4_00_00_00 ) -> int:
"""simple docstring"""
__UpperCamelCase = []
__UpperCamelCase, __UpperCamelCase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_lowercase )
__UpperCamelCase, __UpperCamelCase = b, a + b
return sum(_lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 284 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'bit'
UpperCamelCase__ = ['preactivation', 'bottleneck']
UpperCamelCase__ = ['SAME', 'VALID']
def __init__( self , snake_case_=3 , snake_case_=64 , snake_case_=[2_56, 5_12, 10_24, 20_48] , snake_case_=[3, 4, 6, 3] , snake_case_="preactivation" , snake_case_="relu" , snake_case_=None , snake_case_=32 , snake_case_=0.0 , snake_case_=False , snake_case_=32 , snake_case_=1 , snake_case_=None , snake_case_=None , **snake_case_ , ):
super().__init__(**snake_case_ )
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
lowercase =global_padding.upper()
else:
raise ValueError(f'Padding strategy {global_padding} not supported' )
lowercase =num_channels
lowercase =embedding_size
lowercase =hidden_sizes
lowercase =depths
lowercase =layer_type
lowercase =hidden_act
lowercase =global_padding
lowercase =num_groups
lowercase =drop_path_rate
lowercase =embedding_dynamic_padding
lowercase =output_stride
lowercase =width_factor
lowercase =['''stem'''] + [f'stage{idx}' for idx in range(1 , len(snake_case_ ) + 1 )]
lowercase , lowercase =get_aligned_output_features_output_indices(
out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
| 716 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __magic_name__ :
UpperCamelCase__ = LEDConfig
UpperCamelCase__ = {}
UpperCamelCase__ = 'gelu'
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=False , snake_case_=99 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=20 , snake_case_=2 , snake_case_=1 , snake_case_=0 , snake_case_=4 , ):
lowercase =parent
lowercase =batch_size
lowercase =seq_length
lowercase =is_training
lowercase =use_labels
lowercase =vocab_size
lowercase =hidden_size
lowercase =num_hidden_layers
lowercase =num_attention_heads
lowercase =intermediate_size
lowercase =hidden_dropout_prob
lowercase =attention_probs_dropout_prob
lowercase =max_position_embeddings
lowercase =eos_token_id
lowercase =pad_token_id
lowercase =bos_token_id
lowercase =attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
lowercase =self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
lowercase =(
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _A( self ):
lowercase =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase =tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
lowercase =prepare_led_inputs_dict(snake_case_ , snake_case_ , snake_case_ )
lowercase =tf.concat(
[tf.zeros_like(snake_case_ )[:, :-1], tf.ones_like(snake_case_ )[:, -1:]] , axis=-1 , )
lowercase =global_attention_mask
return config, inputs_dict
def _A( self , snake_case_ , snake_case_ ):
lowercase =TFLEDModel(config=snake_case_ ).get_decoder()
lowercase =inputs_dict['''input_ids''']
lowercase =input_ids[:1, :]
lowercase =inputs_dict['''attention_mask'''][:1, :]
lowercase =1
# first forward pass
lowercase =model(snake_case_ , attention_mask=snake_case_ , use_cache=snake_case_ )
lowercase , lowercase =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase =ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowercase =tf.concat([input_ids, next_tokens] , axis=-1 )
lowercase =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowercase =model(snake_case_ , attention_mask=snake_case_ )[0]
lowercase =model(snake_case_ , attention_mask=snake_case_ , past_key_values=snake_case_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowercase =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowercase =output_from_no_past[:, -3:, random_slice_idx]
lowercase =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case_ , snake_case_ , rtol=1E-3 )
def UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : List[str]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Any=None , lowercase_ : Any=None , ) -> Optional[int]:
'''simple docstring'''
if attention_mask is None:
lowercase =tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __magic_name__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCamelCase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
UpperCamelCase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase__ = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def _A( self ):
lowercase =TFLEDModelTester(self )
lowercase =ConfigTester(self , config_class=snake_case_ )
def _A( self ):
self.config_tester.run_common_tests()
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case_ )
def _A( self ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
lowercase =tf.zeros_like(inputs_dict['''attention_mask'''] )
lowercase =2
lowercase =tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
lowercase =True
lowercase =self.model_tester.seq_length
lowercase =self.model_tester.encoder_seq_length
def check_decoder_attentions_output(snake_case_ ):
lowercase =outputs.decoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(snake_case_ ):
lowercase =[t.numpy() for t in outputs.encoder_attentions]
lowercase =[t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
lowercase =True
lowercase =False
lowercase =False
lowercase =model_class(snake_case_ )
lowercase =model(self._prepare_for_class(snake_case_ , snake_case_ ) )
lowercase =len(snake_case_ )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
if self.is_encoder_decoder:
lowercase =model_class(snake_case_ )
lowercase =model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_decoder_attentions_output(snake_case_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowercase =True
lowercase =model_class(snake_case_ )
lowercase =model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
# Check attention is always last and order is fine
lowercase =True
lowercase =True
lowercase =model_class(snake_case_ )
lowercase =model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case_ ) )
self.assertEqual(model.config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def _A( self ):
pass
def _A( self ):
# TODO: Head-masking not yet implement
pass
def UpperCamelCase ( lowercase_ : List[str] ) -> Optional[int]:
'''simple docstring'''
return tf.constant(lowercase_ , dtype=tf.intaa )
_UpperCAmelCase : Any = 1e-4
@slow
@require_tf
class __magic_name__ ( unittest.TestCase ):
def _A( self ):
lowercase =TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
lowercase =_long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
lowercase =_long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
lowercase =prepare_led_inputs_dict(model.config , snake_case_ , snake_case_ )
lowercase =model(**snake_case_ )[0]
lowercase =(1, 10_24, 7_68)
self.assertEqual(output.shape , snake_case_ )
# change to expected output here
lowercase =tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-3 )
def _A( self ):
lowercase =TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
lowercase =_long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
lowercase =_long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
lowercase =prepare_led_inputs_dict(model.config , snake_case_ , snake_case_ )
lowercase =model(**snake_case_ )[0]
lowercase =(1, 10_24, model.config.vocab_size)
self.assertEqual(output.shape , snake_case_ )
# change to expected output here
lowercase =tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-3 , rtol=1E-3 )
| 145 | 0 |
# Lint as: python3
import itertools
import os
import re
__A : Tuple = re.compile(R'''([A-Z]+)([A-Z][a-z])''')
__A : Any = re.compile(R'''([a-z\d])([A-Z])''')
__A : Optional[int] = re.compile(R'''(?<!_)_(?!_)''')
__A : Any = re.compile(R'''(_{2,})''')
__A : str = R'''^\w+(\.\w+)*$'''
__A : List[str] = R'''<>:/\|?*'''
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : int = _uppercase_uppercase_re.sub(r'\1_\2', _UpperCAmelCase )
lowerCAmelCase : Optional[int] = _lowercase_uppercase_re.sub(r'\1_\2', _UpperCAmelCase )
return name.lower()
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = _single_underscore_re.split(_UpperCAmelCase )
lowerCAmelCase : Optional[Any] = [_multiple_underscores_re.split(_UpperCAmelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(_UpperCAmelCase ) if n != '' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Dict:
'''simple docstring'''
if os.path.basename(_UpperCAmelCase ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Dict:
'''simple docstring'''
if os.path.basename(_UpperCAmelCase ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re, _UpperCAmelCase ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(_UpperCAmelCase )}-{split}"
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None ) -> str:
'''simple docstring'''
lowerCAmelCase : Tuple = filename_prefix_for_split(_UpperCAmelCase, _UpperCAmelCase )
if filetype_suffix:
prefix += f".{filetype_suffix}"
lowerCAmelCase : Tuple = os.path.join(_UpperCAmelCase, _UpperCAmelCase )
return f"{filepath}*"
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None, _UpperCAmelCase=None ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase : Dict = filename_prefix_for_split(_UpperCAmelCase, _UpperCAmelCase )
lowerCAmelCase : Optional[Any] = os.path.join(_UpperCAmelCase, _UpperCAmelCase )
if shard_lengths:
lowerCAmelCase : List[str] = len(_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(_UpperCAmelCase )]
if filetype_suffix:
lowerCAmelCase : List[str] = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
lowerCAmelCase : str = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 343 |
from ...processing_utils import ProcessorMixin
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : str = "SpeechT5FeatureExtractor"
lowerCAmelCase_ : Any = "SpeechT5Tokenizer"
def __init__( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
def __call__( self : Optional[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Union[str, Any] ):
lowerCAmelCase : List[str] = kwargs.pop('audio' , UpperCAmelCase_ )
lowerCAmelCase : str = kwargs.pop('text' , UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = kwargs.pop('text_target' , UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = kwargs.pop('audio_target' , UpperCAmelCase_ )
lowerCAmelCase : int = kwargs.pop('sampling_rate' , UpperCAmelCase_ )
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' )
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' )
if audio is not None:
lowerCAmelCase : Dict = self.feature_extractor(UpperCAmelCase_ , *UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , **UpperCAmelCase_ )
elif text is not None:
lowerCAmelCase : List[Any] = self.tokenizer(UpperCAmelCase_ , **UpperCAmelCase_ )
else:
lowerCAmelCase : Any = None
if audio_target is not None:
lowerCAmelCase : Tuple = self.feature_extractor(audio_target=UpperCAmelCase_ , *UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase : int = targets['input_values']
elif text_target is not None:
lowerCAmelCase : Optional[int] = self.tokenizer(UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = targets['input_ids']
else:
lowerCAmelCase : Union[str, Any] = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase : Dict = labels
lowerCAmelCase : Any = targets.get('attention_mask' )
if decoder_attention_mask is not None:
lowerCAmelCase : Tuple = decoder_attention_mask
return inputs
def lowercase__ ( self : Union[str, Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Dict ):
lowerCAmelCase : Optional[Any] = kwargs.pop('input_values' , UpperCAmelCase_ )
lowerCAmelCase : List[str] = kwargs.pop('input_ids' , UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = kwargs.pop('labels' , UpperCAmelCase_ )
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' )
if input_values is not None:
lowerCAmelCase : List[str] = self.feature_extractor.pad(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
elif input_ids is not None:
lowerCAmelCase : Dict = self.tokenizer.pad(UpperCAmelCase_ , **UpperCAmelCase_ )
else:
lowerCAmelCase : str = None
if labels is not None:
if "input_ids" in labels or (isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and "input_ids" in labels[0]):
lowerCAmelCase : int = self.tokenizer.pad(UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase : Dict = targets['input_ids']
else:
lowerCAmelCase : Any = self.feature_extractor.feature_size
lowerCAmelCase : str = self.feature_extractor.num_mel_bins
lowerCAmelCase : Optional[int] = self.feature_extractor.pad(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase : List[Any] = feature_size_hack
lowerCAmelCase : Tuple = targets['input_values']
else:
lowerCAmelCase : Tuple = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase : Union[str, Any] = labels
lowerCAmelCase : List[str] = targets.get('attention_mask' )
if decoder_attention_mask is not None:
lowerCAmelCase : Optional[Any] = decoder_attention_mask
return inputs
def lowercase__ ( self : int , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[str] ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def lowercase__ ( self : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[str] ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 343 | 1 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class _UpperCamelCase:
def __init__( self : Dict , _lowerCamelCase : int , _lowerCamelCase : int=13 , _lowerCamelCase : List[Any]=7 , _lowerCamelCase : List[str]=True , _lowerCamelCase : int=True , _lowerCamelCase : Any=True , _lowerCamelCase : List[Any]=True , _lowerCamelCase : List[str]=99 , _lowerCamelCase : Any=64 , _lowerCamelCase : Dict=32 , _lowerCamelCase : Dict=5 , _lowerCamelCase : int=4 , _lowerCamelCase : Optional[Any]=37 , _lowerCamelCase : Optional[Any]="gelu" , _lowerCamelCase : int=0.1 , _lowerCamelCase : str=0.1 , _lowerCamelCase : Dict=5_12 , _lowerCamelCase : Union[str, Any]=16 , _lowerCamelCase : Dict=2 , _lowerCamelCase : str=0.02 , _lowerCamelCase : Dict=3 , _lowerCamelCase : Tuple=4 , _lowerCamelCase : Optional[Any]=None , ):
_UpperCAmelCase : int = parent
_UpperCAmelCase : Dict = batch_size
_UpperCAmelCase : Dict = seq_length
_UpperCAmelCase : Optional[Any] = is_training
_UpperCAmelCase : List[str] = use_input_mask
_UpperCAmelCase : int = use_token_type_ids
_UpperCAmelCase : Dict = use_labels
_UpperCAmelCase : List[str] = vocab_size
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : Dict = embedding_size
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : Any = num_attention_heads
_UpperCAmelCase : Any = intermediate_size
_UpperCAmelCase : List[str] = hidden_act
_UpperCAmelCase : Any = hidden_dropout_prob
_UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : List[Any] = type_vocab_size
_UpperCAmelCase : Optional[int] = type_sequence_label_size
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : Dict = num_labels
_UpperCAmelCase : List[str] = num_choices
_UpperCAmelCase : Optional[int] = scope
def a__ ( self : str ):
_UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Optional[int] = None
if self.use_input_mask:
_UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : str = None
if self.use_token_type_ids:
_UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : List[str] = None
if self.use_labels:
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Tuple ):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
def a__ ( self : Tuple , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : int ):
_UpperCAmelCase : int = MegatronBertModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_UpperCAmelCase : List[str] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
_UpperCAmelCase : Union[str, Any] = model(_lowerCamelCase , token_type_ids=_lowerCamelCase )
_UpperCAmelCase : int = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] ):
_UpperCAmelCase : str = MegatronBertForMaskedLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_UpperCAmelCase : Any = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple ):
_UpperCAmelCase : Union[str, Any] = MegatronBertForCausalLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_UpperCAmelCase : Any = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : Dict , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] ):
_UpperCAmelCase : int = MegatronBertForNextSentencePrediction(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_UpperCAmelCase : Any = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def a__ ( self : Dict , _lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] ):
_UpperCAmelCase : List[str] = MegatronBertForPreTraining(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_UpperCAmelCase : Optional[int] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , next_sentence_label=_lowerCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def a__ ( self : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : int ):
_UpperCAmelCase : List[Any] = MegatronBertForQuestionAnswering(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_UpperCAmelCase : Optional[Any] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : Any ):
_UpperCAmelCase : str = self.num_labels
_UpperCAmelCase : Tuple = MegatronBertForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_UpperCAmelCase : str = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : int , _lowerCamelCase : List[Any] ):
_UpperCAmelCase : Optional[int] = self.num_labels
_UpperCAmelCase : Dict = MegatronBertForTokenClassification(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_UpperCAmelCase : List[Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Tuple , _lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : Tuple ):
_UpperCAmelCase : Any = self.num_choices
_UpperCAmelCase : Tuple = MegatronBertForMultipleChoice(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_UpperCAmelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : List[Any] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Tuple ):
_UpperCAmelCase : Any = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,
) : str = config_and_inputs
_UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__A: Tuple = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
__A: Optional[int] = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__A: Tuple = True
# test_resize_embeddings = False
__A: Tuple = False
def a__ ( self : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : Optional[int]=False ):
_UpperCAmelCase : List[Any] = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if return_labels:
if model_class in get_values(_lowerCamelCase ):
_UpperCAmelCase : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowerCamelCase )
_UpperCAmelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCamelCase )
return inputs_dict
def a__ ( self : Tuple ):
_UpperCAmelCase : str = MegatronBertModelTester(self )
_UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 )
def a__ ( self : Dict ):
self.config_tester.run_common_tests()
def a__ ( self : str ):
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*_lowerCamelCase )
def a__ ( self : List[str] ):
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*_lowerCamelCase )
def a__ ( self : Tuple ):
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*_lowerCamelCase )
def a__ ( self : Optional[int] ):
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*_lowerCamelCase )
def a__ ( self : Any ):
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*_lowerCamelCase )
def a__ ( self : Dict ):
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*_lowerCamelCase )
def a__ ( self : Optional[int] ):
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*_lowerCamelCase )
def a__ ( self : Any ):
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*_lowerCamelCase )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
return torch.tensor(
_SCREAMING_SNAKE_CASE , dtype=torch.long , device=_SCREAMING_SNAKE_CASE , )
__lowerCamelCase = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase( unittest.TestCase ):
@slow
@unittest.skip("Model is not available." )
def a__ ( self : Optional[Any] ):
_UpperCAmelCase : Union[str, Any] = "nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
_UpperCAmelCase : Optional[Any] = os.path.join(os.environ["MYDIR"] , _lowerCamelCase )
_UpperCAmelCase : str = MegatronBertModel.from_pretrained(_lowerCamelCase )
model.to(_lowerCamelCase )
model.half()
_UpperCAmelCase : Dict = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(_lowerCamelCase )[0]
_UpperCAmelCase : int = torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , _lowerCamelCase )
_UpperCAmelCase : Dict = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28]
for ii in range(3 ):
for jj in range(3 ):
_UpperCAmelCase : str = output[0, ii, jj]
_UpperCAmelCase : Optional[int] = expected[3 * ii + jj]
_UpperCAmelCase : int = "ii={} jj={} a={} b={}".format(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.assertTrue(math.isclose(_lowerCamelCase , _lowerCamelCase , rel_tol=_lowerCamelCase , abs_tol=_lowerCamelCase ) , msg=_lowerCamelCase )
| 328 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class _UpperCamelCase( SCREAMING_SNAKE_CASE ):
__A: Optional[Any] = """microsoft/speecht5_tts"""
__A: Tuple = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
__A: Any = """text_reader"""
__A: Optional[Any] = SpeechTaProcessor
__A: int = SpeechTaForTextToSpeech
__A: Tuple = SpeechTaHifiGan
__A: Optional[Any] = ["""text"""]
__A: int = ["""audio"""]
def a__ ( self : List[str] ):
if self.post_processor is None:
_UpperCAmelCase : Union[str, Any] = "microsoft/speecht5_hifigan"
super().setup()
def a__ ( self : Any , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any]=None ):
_UpperCAmelCase : Any = self.pre_processor(text=_lowerCamelCase , return_tensors="pt" , truncation=_lowerCamelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
_UpperCAmelCase : str = load_dataset("Matthijs/cmu-arctic-xvectors" , split="validation" )
_UpperCAmelCase : Optional[Any] = torch.tensor(embeddings_dataset[73_05]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def a__ ( self : Union[str, Any] , _lowerCamelCase : List[str] ):
with torch.no_grad():
return self.model.generate_speech(**_lowerCamelCase )
def a__ ( self : int , _lowerCamelCase : str ):
with torch.no_grad():
return self.post_processor(_lowerCamelCase ).cpu().detach()
| 328 | 1 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def a__ ( lowercase__ ):
'''simple docstring'''
def is_in_circle(lowercase__ , lowercase__ ) -> bool:
UpperCAmelCase_ =sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
UpperCAmelCase_ =mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowercase__ ) )
# The ratio of the area for circle to square is pi/4.
UpperCAmelCase_ =proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def a__ ( lowercase__ , lowercase__ , lowercase__ = 0.0 , lowercase__ = 1.0 , ):
'''simple docstring'''
return mean(
function_to_integrate(uniform(lowercase__ , lowercase__ ) ) for _ in range(lowercase__ ) ) * (max_value - min_value)
def a__ ( lowercase__ , lowercase__ = 0.0 , lowercase__ = 1.0 ):
'''simple docstring'''
def identity_function(lowercase__ ) -> float:
return x
UpperCAmelCase_ =area_under_curve_estimator(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
UpperCAmelCase_ =(max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print("******************" )
def a__ ( lowercase__ ):
'''simple docstring'''
def function_to_integrate(lowercase__ ) -> float:
return sqrt(4.0 - x * x )
UpperCAmelCase_ =area_under_curve_estimator(
lowercase__ , lowercase__ , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 |
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
_SCREAMING_SNAKE_CASE : List[Any] = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE : Optional[int] = 50 # max width of layer names
_SCREAMING_SNAKE_CASE : Union[str, Any] = 70 # max width of quantizer names
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
snake_case_ = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=snake_case , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=snake_case , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=snake_case , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=snake_case , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=snake_case , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=snake_case , type=snake_case , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=snake_case , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def UpperCamelCase_( snake_case : List[str] ):
'''simple docstring'''
if args.calibrator == "max":
snake_case_ = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
snake_case_ = "histogram"
elif args.calibrator == "mse":
snake_case_ = "histogram"
else:
raise ValueError(f'Invalid calibrator {args.calibrator}' )
snake_case_ = QuantDescriptor(num_bits=args.aprec , calib_method=snake_case )
snake_case_ = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(snake_case )
quant_nn.QuantLinear.set_default_quant_desc_weight(snake_case )
def UpperCamelCase_( snake_case : List[str] , snake_case : Any , snake_case : Optional[int]=False , snake_case : List[Any]=False ):
'''simple docstring'''
logger.info("Configuring Model for Quantization" )
logger.info(f'using quantization package {pytorch_quantization.__file__}' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(snake_case , ["embeddings"] , which="weight" , _disabled=snake_case )
if args.quant_disable:
set_quantizer_by_name(snake_case , [""] , _disabled=snake_case )
if args.quant_disable_keyword:
set_quantizer_by_name(snake_case , args.quant_disable_keyword , _disabled=snake_case )
if args.quant_disable_layer_module:
set_quantizer_by_name(snake_case , [r"layer.\d+." + args.quant_disable_layer_module] , _disabled=snake_case )
if args.quant_enable_layer_module:
set_quantizer_by_name(snake_case , [r"layer.\d+." + args.quant_enable_layer_module] , _disabled=snake_case )
if args.recalibrate_weights:
recalibrate_weights(snake_case )
if args.fuse_qkv:
fuse_qkv(snake_case , snake_case )
if args.clip_gelu:
clip_gelu(snake_case , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(snake_case )
def UpperCamelCase_( snake_case : List[Any] ):
'''simple docstring'''
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'{name:80}: {module}' )
def UpperCamelCase_( snake_case : Union[str, Any] , snake_case : Optional[int] ):
'''simple docstring'''
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(snake_case )
def UpperCamelCase_( snake_case : str , snake_case : List[str] ):
'''simple docstring'''
def fusea(snake_case : List[Any] , snake_case : str , snake_case : Dict ):
for mod in [qq, qk, qv]:
if not hasattr(snake_case , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
snake_case_ = qq._amax.detach().item()
snake_case_ = qk._amax.detach().item()
snake_case_ = qv._amax.detach().item()
snake_case_ = max(snake_case , snake_case , snake_case )
qq._amax.fill_(snake_case )
qk._amax.fill_(snake_case )
qv._amax.fill_(snake_case )
logger.info(f' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(f'FUSE_QKV: {name:{name_width}}' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def UpperCamelCase_( snake_case : str , snake_case : Optional[Any] ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
snake_case_ = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=snake_case )
snake_case_ = mod._input_quantizer._amax.data.detach().item()
logger.info(f'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' )
def UpperCamelCase_( snake_case : Any ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(snake_case , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
snake_case_ = mod.weight.shape[0]
snake_case_ = mod._weight_quantizer._amax.detach()
snake_case_ = torch.ones(snake_case , dtype=amax.dtype , device=amax.device ) * amax
print(f'expanding {name} {amax} -> {mod._weight_quantizer._amax}' )
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(snake_case , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
snake_case_ = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
snake_case_ = set(range(len(mod.weight.size() ) ) ) - axis_set
snake_case_ = pytorch_quantization.utils.reduce_amax(mod.weight , axis=snake_case , keepdims=snake_case ).detach()
logger.info(f'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' )
snake_case_ = amax
def UpperCamelCase_( snake_case : Optional[Any] , snake_case : List[Any]=2_5 , snake_case : Optional[Any]=1_8_0 , snake_case : int=None ):
'''simple docstring'''
if ignore is None:
snake_case_ = []
elif not isinstance(snake_case , snake_case ):
snake_case_ = [ignore]
snake_case_ = 0
for name, mod in model.named_modules():
if not hasattr(snake_case , "weight" ):
continue
snake_case_ = max(snake_case , len(snake_case ) )
for name, mod in model.named_modules():
snake_case_ = getattr(snake_case , "_input_quantizer" , snake_case )
snake_case_ = getattr(snake_case , "_weight_quantizer" , snake_case )
if not hasattr(snake_case , "weight" ):
continue
if type(snake_case ) in ignore:
continue
if [True for s in ignore if type(snake_case ) is str and s in name]:
continue
snake_case_ = f'Act:{input_q.extra_repr()}'
snake_case_ = f'Wgt:{weight_q.extra_repr()}'
snake_case_ = f'{name:{name_width}} {act_str} {wgt_str}'
if len(snake_case ) <= line_width:
logger.info(snake_case )
else:
logger.info(f'{name:{name_width}} {act_str}' )
logger.info(f'{" ":{name_width}} {wgt_str}' )
def UpperCamelCase_( snake_case : Dict ):
'''simple docstring'''
snake_case_ = 0
for name, mod in model.named_modules():
if isinstance(snake_case , pytorch_quantization.nn.TensorQuantizer ):
print(f'{name:80} {mod}' )
count += 1
print(f'{count} TensorQuantizers found in model' )
def UpperCamelCase_( snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Dict , snake_case : Any , snake_case : Optional[int] ):
'''simple docstring'''
snake_case_ = getattr(snake_case , snake_case , snake_case )
if quantizer_mod is not None:
assert hasattr(snake_case , snake_case )
setattr(snake_case , snake_case , snake_case )
else:
logger.warning(f'{name} has no {quantizer}' )
def UpperCamelCase_( snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Tuple="both" , **snake_case : Union[str, Any] ):
'''simple docstring'''
snake_case_ = f'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
if which in ["input", "both"]:
set_quantizer(snake_case , snake_case , "_input_quantizer" , snake_case , snake_case )
if which in ["weight", "both"]:
set_quantizer(snake_case , snake_case , "_weight_quantizer" , snake_case , snake_case )
logger.info(snake_case )
def UpperCamelCase_( snake_case : Optional[Any] , snake_case : str , **snake_case : str ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(snake_case , "_input_quantizer" ) or hasattr(snake_case , "_weight_quantizer" ):
for n in names:
if re.search(snake_case , snake_case ):
set_quantizers(snake_case , snake_case , **snake_case )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(snake_case , snake_case ):
snake_case_ = f'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
setattr(snake_case , snake_case , snake_case )
logger.info(snake_case )
| 400 | 0 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( __snake_case : Tuple, __snake_case : Any, __snake_case : Tuple, __snake_case : Union[str, Any] ) -> None:
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
A__ , A__ : Optional[Any] =array[indexa], array[indexa]
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : Optional[int], __snake_case : Dict, __snake_case : Union[str, Any] ) -> None:
"""simple docstring"""
if length > 1:
A__ : Optional[Any] =int(length / 2 )
for i in range(__snake_case, low + middle ):
comp_and_swap(__snake_case, __snake_case, i + middle, __snake_case )
bitonic_merge(__snake_case, __snake_case, __snake_case, __snake_case )
bitonic_merge(__snake_case, low + middle, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[str], __snake_case : Optional[int], __snake_case : Dict ) -> None:
"""simple docstring"""
if length > 1:
A__ : int =int(length / 2 )
bitonic_sort(__snake_case, __snake_case, __snake_case, 1 )
bitonic_sort(__snake_case, low + middle, __snake_case, 0 )
bitonic_merge(__snake_case, __snake_case, __snake_case, __snake_case )
if __name__ == "__main__":
__snake_case : Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
__snake_case : Tuple = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 712 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __lowerCamelCase ( __snake_case : Dict ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
super().__init__()
A__ : Union[str, Any] =module
A__ : Union[str, Any] =nn.Sequential(
nn.Linear(module.in_features , lowerCAmelCase_ , bias=lowerCAmelCase_ ) , nn.Linear(lowerCAmelCase_ , module.out_features , bias=lowerCAmelCase_ ) , )
A__ : Tuple =(2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Optional[int] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : int ) -> Dict:
'''simple docstring'''
return self.module(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) + self.adapter(lowerCAmelCase_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
__snake_case = 'bigscience/bloom-1b7'
# Constant values
__snake_case = 2.109659552692574
__snake_case = 'Hello my name is'
__snake_case = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
__snake_case = 10
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
# Models and tokenizer
A__ : List[Any] =AutoTokenizer.from_pretrained(self.model_name )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# Models and tokenizer
A__ : Optional[int] =AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
A__ : str =self.model_abit.config
self.assertTrue(hasattr(lowerCAmelCase_ , """quantization_config""" ) )
A__ : Union[str, Any] =config.to_dict()
A__ : Any =config.to_diff_dict()
A__ : Optional[Any] =config.to_json_string()
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
A__ : int =self.model_fpaa.get_memory_footprint()
A__ : Optional[Any] =self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
A__ : Tuple =get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCAmelCase_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ : int =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Union[str, Any] =self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
A__ : Tuple =BitsAndBytesConfig()
A__ : Tuple =True
A__ : Optional[int] =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , device_map="""auto""" )
A__ : Union[str, Any] =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Optional[Any] =model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Any:
'''simple docstring'''
A__ : Tuple =BitsAndBytesConfig()
with self.assertRaises(lowerCAmelCase_ ):
A__ : Dict =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , load_in_abit=lowerCAmelCase_ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Optional[Any] =self.model_fpaa.to(torch.floataa )
A__ : Dict =self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
A__ : List[str] =self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
A__ : List[str] =self.model_fpaa.half()
# Check this does not throw an error
A__ : int =self.model_fpaa.float()
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
A__ : Dict =AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowercase__ ( cls : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Tuple ="""t5-small"""
A__ : Optional[Any] ="""google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
A__ : Optional[int] =AutoTokenizer.from_pretrained(cls.model_name )
A__ : Optional[int] ="""Translate in German: Hello, my dog is cute"""
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
from transformers import TaForConditionalGeneration
A__ : Optional[int] =TaForConditionalGeneration._keep_in_fpaa_modules
A__ : Optional[Any] =None
# test with `t5-small`
A__ : str =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : List[str] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Optional[Any] =model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
A__ : List[str] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : Tuple =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Union[str, Any] =model.generate(**lowerCAmelCase_ )
A__ : Dict =modules
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
A__ : Optional[int] =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Any =model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
A__ : Union[str, Any] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : Optional[int] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Dict =model.generate(**lowerCAmelCase_ )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
super().setUp()
# model_name
A__ : Any ="""bigscience/bloom-560m"""
A__ : List[Any] ="""t5-small"""
# Different types of model
A__ : Dict =AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Sequence classification model
A__ : List[Any] =AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# CausalLM model
A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Seq2seq model
A__ : List[str] =AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
super().setUp()
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
A__ : Dict =pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
A__ : Optional[int] =self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
super().setUp()
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
A__ : int =AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
A__ : str =self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
A__ : Any =model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] ="""facebook/opt-350m"""
super().setUp()
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
A__ : Optional[Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
A__ : int =False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
A__ : Dict =param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCAmelCase_ ) ):
A__ : int =LoRALayer(module.q_proj , rank=16 )
A__ : Any =LoRALayer(module.k_proj , rank=16 )
A__ : Union[str, Any] =LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
A__ : List[Any] =self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
A__ : Any =model.forward(**lowerCAmelCase_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCAmelCase_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'gpt2-xl'
__snake_case = 3.3191854854152187
| 687 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class lowercase__ ( snake_case_ ):
'''simple docstring'''
_snake_case = "microsoft/speecht5_tts"
_snake_case = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
_snake_case = "text_reader"
_snake_case = SpeechTaProcessor
_snake_case = SpeechTaForTextToSpeech
_snake_case = SpeechTaHifiGan
_snake_case = ["text"]
_snake_case = ["audio"]
def UpperCAmelCase ( self ):
'''simple docstring'''
if self.post_processor is None:
UpperCamelCase = '''microsoft/speecht5_hifigan'''
super().setup()
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__=None ):
'''simple docstring'''
UpperCamelCase = self.pre_processor(text=lowerCamelCase__ , return_tensors='''pt''' , truncation=lowerCamelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
UpperCamelCase = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
UpperCamelCase = torch.tensor(embeddings_dataset[7_3_0_5]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
with torch.no_grad():
return self.post_processor(lowerCamelCase__ ).cpu().detach()
| 212 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_lowerCamelCase : List[Any] = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_lowerCamelCase : Any = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_lowerCamelCase : Union[str, Any] = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def a_ ( __lowercase : List[Any] , __lowercase : Any ) -> Union[str, Any]:
return float((preds == labels).mean() )
def a_ ( __lowercase : Optional[Any] , __lowercase : List[str] ) -> Dict:
_snake_case = simple_accuracy(__lowercase , __lowercase )
_snake_case = float(fa_score(y_true=__lowercase , y_pred=__lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( __lowercase : int , __lowercase : str ) -> str:
_snake_case = float(pearsonr(__lowercase , __lowercase )[0] )
_snake_case = float(spearmanr(__lowercase , __lowercase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def A ( self : List[Any] , lowercase : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowercase , lowercase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowercase , lowercase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' ) | 686 | 0 |
'''simple docstring'''
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Any ="""microsoft/speecht5_tts"""
UpperCamelCase__ : List[Any] =(
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
UpperCamelCase__ : Tuple ="""text_reader"""
UpperCamelCase__ : Optional[Any] =SpeechTaProcessor
UpperCamelCase__ : List[str] =SpeechTaForTextToSpeech
UpperCamelCase__ : int =SpeechTaHifiGan
UpperCamelCase__ : Union[str, Any] =["""text"""]
UpperCamelCase__ : str =["""audio"""]
def __lowercase ( self ):
"""simple docstring"""
if self.post_processor is None:
__UpperCamelCase : Union[str, Any] ='microsoft/speecht5_hifigan'
super().setup()
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=None ):
"""simple docstring"""
__UpperCamelCase : Any =self.pre_processor(text=_UpperCAmelCase , return_tensors='pt' , truncation=_UpperCAmelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('Datasets needs to be installed if not passing speaker embeddings.' )
__UpperCamelCase : str =load_dataset('Matthijs/cmu-arctic-xvectors' , split='validation' )
__UpperCamelCase : List[str] =torch.tensor(embeddings_dataset[7305]['xvector'] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
with torch.no_grad():
return self.model.generate_speech(**_UpperCAmelCase )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
with torch.no_grad():
return self.post_processor(_UpperCAmelCase ).cpu().detach()
| 714 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
A_ :Optional[int] = logging.get_logger()
@dataclass
class __A :
"""simple docstring"""
UpperCamelCase__ : nn.Module
UpperCamelCase__ : List[nn.Module] =field(default_factory=a )
UpperCamelCase__ : list =field(default_factory=a )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Dict =len(list(m.modules() ) ) == 1 or isinstance(lowerCamelCase__ , nn.Convad ) or isinstance(lowerCamelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCamelCase__ )
def __call__( self , lowerCamelCase__ ):
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCamelCase__ )
[x.remove() for x in self.handles]
return self
@property
def __lowercase ( self ):
"""simple docstring"""
return list(filter(lambda lowerCamelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __A :
"""simple docstring"""
UpperCamelCase__ : nn.Module
UpperCamelCase__ : nn.Module
UpperCamelCase__ : int =0
UpperCamelCase__ : List =field(default_factory=a )
UpperCamelCase__ : List =field(default_factory=a )
def __call__( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : int =Tracker(self.dest )(lowerCamelCase__ ).parametrized
__UpperCamelCase : int =Tracker(self.src )(lowerCamelCase__ ).parametrized
__UpperCamelCase : Optional[int] =list(filter(lambda lowerCamelCase__ : type(lowerCamelCase__ ) not in self.src_skip , lowerCamelCase__ ) )
__UpperCamelCase : Optional[int] =list(filter(lambda lowerCamelCase__ : type(lowerCamelCase__ ) not in self.dest_skip , lowerCamelCase__ ) )
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise Exception(
f'Numbers of operations are different. Source module has {len(lowerCamelCase__ )} operations while'
f' destination module has {len(lowerCamelCase__ )}.' )
for dest_m, src_m in zip(lowerCamelCase__ , lowerCamelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
def A ( a_ ,a_ ,a_ ,a_ = True ) -> Dict:
print(F'Converting {name}...' )
with torch.no_grad():
__UpperCamelCase : Tuple =timm.create_model(a_ ,pretrained=a_ ).eval()
__UpperCamelCase : Tuple =ResNetForImageClassification(a_ ).eval()
__UpperCamelCase : Optional[int] =ModuleTransfer(src=a_ ,dest=a_ )
__UpperCamelCase : Optional[int] =torch.randn((1, 3, 224, 224) )
module_transfer(a_ )
assert torch.allclose(from_model(a_ ) ,our_model(a_ ).logits ), "The model logits don't match the original one."
__UpperCamelCase : int =F'resnet{"-".join(name.split("resnet" ) )}'
print(a_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message='Add model' ,use_temp_dir=a_ ,)
# we can use the convnext one
__UpperCamelCase : Optional[int] =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message='Add image processor' ,use_temp_dir=a_ ,)
print(F'Pushed {checkpoint_name}' )
def A ( a_ ,a_ = None ,a_ = True ) -> int:
__UpperCamelCase : Any ='imagenet-1k-id2label.json'
__UpperCamelCase : Optional[Any] =1_000
__UpperCamelCase : Optional[int] =(1, num_labels)
__UpperCamelCase : Any ='huggingface/label-files'
__UpperCamelCase : List[Any] =num_labels
__UpperCamelCase : int =json.load(open(hf_hub_download(a_ ,a_ ,repo_type='dataset' ) ,'r' ) )
__UpperCamelCase : Union[str, Any] ={int(a_ ): v for k, v in idalabel.items()}
__UpperCamelCase : str =idalabel
__UpperCamelCase : List[str] ={v: k for k, v in idalabel.items()}
__UpperCamelCase : Union[str, Any] =partial(a_ ,num_labels=a_ ,idalabel=a_ ,labelaid=a_ )
__UpperCamelCase : Optional[Any] ={
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[64, 128, 256, 512] ,layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[256, 512, 1_024, 2_048] ,layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[64, 128, 256, 512] ,layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[256, 512, 1_024, 2_048] ,layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] ,hidden_sizes=[256, 512, 1_024, 2_048] ,layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] ,hidden_sizes=[256, 512, 1_024, 2_048] ,layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(a_ ,names_to_config[model_name] ,a_ ,a_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(a_ ,a_ ,a_ ,a_ )
return config, expected_shape
if __name__ == "__main__":
A_ :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
A_ :Optional[int] = parser.parse_args()
A_ :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 154 | 0 |
import math
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(SCREAMING_SNAKE_CASE ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="""malus_law""")
| 203 |
from __future__ import annotations
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if start is None:
A_ = 0
if end is None:
A_ = len(SCREAMING_SNAKE_CASE ) - 1
if start >= end:
return
A_ = (start + end) // 2
slowsort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
slowsort(SCREAMING_SNAKE_CASE , mid + 1 , SCREAMING_SNAKE_CASE )
if sequence[end] < sequence[mid]:
A_ ,A_ = sequence[mid], sequence[end]
slowsort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 203 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def lowercase (SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ) -> tuple:
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
"""simple docstring"""
from collections import defaultdict
from math import gcd
def lowercase (SCREAMING_SNAKE_CASE_ : int = 1_50_00_00 ) -> int:
SCREAMING_SNAKE_CASE = defaultdict(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , SCREAMING_SNAKE_CASE_ , 2 ):
if gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) > 1:
continue
SCREAMING_SNAKE_CASE = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(SCREAMING_SNAKE_CASE_ , limit + 1 , SCREAMING_SNAKE_CASE_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 327 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase: Tuple =logging.get_logger(__name__)
lowerCAmelCase: int ={
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class lowerCamelCase__ ( lowerCAmelCase__ ):
__UpperCAmelCase = "rwkv"
__UpperCAmelCase = {"max_position_embeddings": "context_length"}
def __init__( self , snake_case=5_0_2_7_7 , snake_case=1_0_2_4 , snake_case=4_0_9_6 , snake_case=3_2 , snake_case=None , snake_case=None , snake_case=1E-5 , snake_case=0 , snake_case=0 , snake_case=6 , snake_case=False , snake_case=True , **snake_case , ) -> str:
"""simple docstring"""
lowercase : Tuple = vocab_size
lowercase : Optional[int] = context_length
lowercase : List[str] = hidden_size
lowercase : Any = num_hidden_layers
lowercase : List[Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowercase : List[str] = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowercase : Tuple = layer_norm_epsilon
lowercase : Any = rescale_every
lowercase : int = use_cache
lowercase : List[Any] = bos_token_id
lowercase : List[str] = eos_token_id
super().__init__(
tie_word_embeddings=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
| 607 |
'''simple docstring'''
A = 9.80_665
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float = g) -> float:
'''simple docstring'''
if fluid_density <= 0:
raise ValueError('Impossible fluid density')
if volume < 0:
raise ValueError('Impossible Object volume')
if gravity <= 0:
raise ValueError('Impossible Gravity')
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod() | 125 | 0 |
"""simple docstring"""
import qiskit
def A__ ( __lowerCamelCase = 2 ):
"""simple docstring"""
_lowerCAmelCase = qubits
# Using Aer's simulator
_lowerCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
_lowerCAmelCase = qiskit.QuantumCircuit(lowerCamelCase_, lowerCamelCase_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1, lowerCamelCase_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1, lowerCamelCase_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(lowerCamelCase_ ) ), list(range(lowerCamelCase_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
_lowerCAmelCase = qiskit.execute(lowerCamelCase_, lowerCamelCase_, shots=1_0_0_0 )
return job.result().get_counts(lowerCamelCase_ )
if __name__ == "__main__":
print(f'Total count for various states are: {quantum_entanglement(3)}')
| 713 |
"""simple docstring"""
import operator as op
a__ : Optional[int] = """scaler.pt"""
a__ : Dict = """pytorch_model"""
a__ : List[Any] = """random_states"""
a__ : Union[str, Any] = """optimizer"""
a__ : Tuple = """scheduler"""
a__ : Any = """pytorch_model.bin"""
a__ : int = """pytorch_model.bin.index.json"""
a__ : Union[str, Any] = """model.safetensors"""
a__ : Optional[int] = """model.safetensors.index.json"""
a__ : str = """1.10.2"""
a__ : int = """py38"""
a__ : Any = """4.17.0"""
a__ : List[str] = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""]
a__ : str = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""]
a__ : Optional[int] = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""]
a__ : int = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""]
a__ : int = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""]
a__ : int = """2.0.1"""
a__ : Optional[Any] = ["""pdsh""", """standard""", """openmpi""", """mvapich"""]
a__ : int = ["""default""", """reduce-overhead""", """max-autotune"""]
a__ : Optional[Any] = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
a__ : Any = [
"""nnodes""",
"""nproc_per_node""",
"""rdzv_backend""",
"""rdzv_endpoint""",
"""rdzv_id""",
"""rdzv_conf""",
"""standalone""",
"""max_restarts""",
"""monitor_interval""",
"""start_method""",
"""role""",
"""module""",
"""m""",
"""no_python""",
"""run_path""",
"""log_dir""",
"""r""",
"""redirects""",
"""t""",
"""tee""",
"""node_rank""",
"""master_addr""",
"""master_port""",
]
a__ : List[str] = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""]
a__ : Dict = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
| 309 | 0 |
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( __UpperCamelCase ):
if not nums:
raise ValueError('''List is empty''' )
return sum(__UpperCamelCase ) / len(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 223 | 0 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
A_ : Any = ["gpt2"]
A_ : Any = "gpt2"
if is_tf_available():
class a_ ( tf.Module ):
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : List[str] = tokenizer
lowerCamelCase__ : Optional[Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Any = TFGPTaLMHeadModel.from_config(lowerCamelCase_ )
@tf.function(input_signature=(tf.TensorSpec((None,), tf.string, name='text' ),) )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.tokenizer(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = tokenized['input_ids'].to_tensor()
lowerCamelCase__ : str = tf.cast(input_ids_dense > 0, tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
lowerCamelCase__ : Union[str, Any] = self.model(input_ids=lowerCamelCase_, attention_mask=lowerCamelCase_ )['logits']
return outputs
@require_tf
@require_keras_nlp
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : Optional[Any] = [GPTaTokenizer.from_pretrained(lowerCamelCase_ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
lowerCamelCase__ : Any = [TFGPTaTokenizer.from_pretrained(lowerCamelCase_ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowerCamelCase__ : Optional[Any] = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
lowerCamelCase__ : Optional[int] = list(zip(self.test_sentences, self.test_sentences[::-1] ) )
def a__ (self ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers, self.tf_tokenizers ):
for test_inputs in self.test_sentences:
lowerCamelCase__ : Union[str, Any] = tokenizer([test_inputs], return_tensors='tf' )
lowerCamelCase__ : List[str] = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
lowerCamelCase__ : Dict = python_outputs[key].numpy()
lowerCamelCase__ : List[str] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(lowerCamelCase_, tf.intaa ) == tf_outputs_values ) )
@slow
def a__ (self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase__ : Any = tf.function(lowerCamelCase_ )
for test_inputs in self.test_sentences:
lowerCamelCase__ : List[str] = tf.constant(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = compiled_tokenizer(lowerCamelCase_ )
lowerCamelCase__ : Any = tf_tokenizer(lowerCamelCase_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def a__ (self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase__ : Dict = ModelToSave(tokenizer=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCamelCase__ : Tuple = model.serving(lowerCamelCase_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCamelCase__ : Any = Path(lowerCamelCase_ ) / 'saved.model'
tf.saved_model.save(lowerCamelCase_, lowerCamelCase_, signatures={'serving_default': model.serving} )
lowerCamelCase__ : List[str] = tf.saved_model.load(lowerCamelCase_ )
lowerCamelCase__ : List[str] = loaded_model.signatures['serving_default'](lowerCamelCase_ )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def a__ (self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase__ : List[str] = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCamelCase__ : Optional[int] = tf_tokenizer(lowerCamelCase_ ) # Build model with some sample inputs
lowerCamelCase__ : int = tf_tokenizer.get_config()
lowerCamelCase__ : List[Any] = TFGPTaTokenizer.from_config(lowerCamelCase_ )
lowerCamelCase__ : Tuple = model_from_config(lowerCamelCase_ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def a__ (self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
lowerCamelCase__ : Any = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
lowerCamelCase__ : List[str] = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCamelCase__ : Any = tf_tokenizer(lowerCamelCase_, max_length=lowerCamelCase_ )
lowerCamelCase__ : Any = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 696 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : int = 'Speech2TextFeatureExtractor'
lowerCamelCase__ : Dict = 'Speech2TextTokenizer'
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
def __call__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase_, **lowerCamelCase_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowerCamelCase__ : Optional[int] = kwargs.pop('raw_speech' )
else:
lowerCamelCase__ : int = kwargs.pop('audio', lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = kwargs.pop('sampling_rate', lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = kwargs.pop('text', lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
lowerCamelCase__ : List[str] = args[0]
lowerCamelCase__ : Any = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowerCamelCase__ : Union[str, Any] = self.feature_extractor(lowerCamelCase_, *lowerCamelCase_, sampling_rate=lowerCamelCase_, **lowerCamelCase_ )
if text is not None:
lowerCamelCase__ : List[Any] = self.tokenizer(lowerCamelCase_, **lowerCamelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ : Tuple = encodings['input_ids']
return inputs
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@contextmanager
def a__ (self ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = self.tokenizer
yield
lowerCamelCase__ : Optional[int] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
| 696 | 1 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _a ( snake_case_ , snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 'pixel_values'
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Union[str, Any] = TimmBackboneConfig
def __init__( self : Optional[int] , UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Optional[Any] ):
requires_backends(self , "timm" )
super().__init__(UpperCAmelCase )
A_ = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(f'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(UpperCAmelCase , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
A_ = getattr(UpperCAmelCase , "use_pretrained_backbone" , UpperCAmelCase )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
A_ = config.out_indices if getattr(UpperCAmelCase , "out_indices" , UpperCAmelCase ) is not None else (-1,)
A_ = timm.create_model(
config.backbone , pretrained=UpperCAmelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCAmelCase , **UpperCAmelCase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
A_ = self._backbone.return_layers
A_ = {layer["module"]: str(UpperCAmelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(UpperCAmelCase )
@classmethod
def __A ( cls : Optional[int] , UpperCAmelCase : List[Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
A_ = kwargs.pop("config" , TimmBackboneConfig() )
A_ = kwargs.pop("use_timm_backbone" , UpperCAmelCase )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
A_ = kwargs.pop("num_channels" , config.num_channels )
A_ = kwargs.pop("features_only" , config.features_only )
A_ = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
A_ = kwargs.pop("out_indices" , config.out_indices )
A_ = TimmBackboneConfig(
backbone=UpperCAmelCase , num_channels=UpperCAmelCase , features_only=UpperCAmelCase , use_pretrained_backbone=UpperCAmelCase , out_indices=UpperCAmelCase , )
return super()._from_config(UpperCAmelCase , **UpperCAmelCase )
def __A ( self : Tuple , UpperCAmelCase : Optional[int] ):
pass
def __A ( self : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict=None , **UpperCAmelCase : int ):
A_ = return_dict if return_dict is not None else self.config.use_return_dict
A_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
A_ = self._all_layers
A_ = self._backbone(UpperCAmelCase , **UpperCAmelCase )
A_ = self._return_layers
A_ = tuple(hidden_states[i] for i in self.out_indices )
else:
A_ = self._backbone(UpperCAmelCase , **UpperCAmelCase )
A_ = None
A_ = tuple(UpperCAmelCase )
A_ = tuple(UpperCAmelCase ) if hidden_states is not None else None
if not return_dict:
A_ = (feature_maps,)
if output_hidden_states:
A_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=UpperCAmelCase , hidden_states=UpperCAmelCase , attentions=UpperCAmelCase ) | 86 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : jnp.ndarray
_UpperCamelCase : jnp.ndarray
class A_ ( nn.Module ):
'''simple docstring'''
_UpperCamelCase : int
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
_UpperCamelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
lowercase = self.block_out_channels[i]
lowercase = self.block_out_channels[i + 1]
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = blocks
lowercase = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case ):
lowercase = self.conv_in(snake_case )
lowercase = nn.silu(snake_case )
for block in self.blocks:
lowercase = block(snake_case )
lowercase = nn.silu(snake_case )
lowercase = self.conv_out(snake_case )
return embedding
@flax_register_to_config
class A_ ( nn.Module , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : int = 32
_UpperCamelCase : int = 4
_UpperCamelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCamelCase : Union[bool, Tuple[bool]] = False
_UpperCamelCase : Tuple[int] = (320, 640, 1280, 1280)
_UpperCamelCase : int = 2
_UpperCamelCase : Union[int, Tuple[int]] = 8
_UpperCamelCase : Optional[Union[int, Tuple[int]]] = None
_UpperCamelCase : int = 1280
_UpperCamelCase : float = 0.0
_UpperCamelCase : bool = False
_UpperCamelCase : jnp.dtype = jnp.floataa
_UpperCamelCase : bool = True
_UpperCamelCase : int = 0
_UpperCamelCase : str = "rgb"
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# init input tensors
lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase = jnp.ones((1,) , dtype=jnp.intaa )
lowercase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase , lowercase = jax.random.split(snake_case )
lowercase = {'params': params_rng, 'dropout': dropout_rng}
return self.init(snake_case , snake_case , snake_case , snake_case , snake_case )["params"]
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.block_out_channels
lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowercase = self.num_attention_heads or self.attention_head_dim
# input
lowercase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowercase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowercase = FlaxTimestepEmbedding(snake_case , dtype=self.dtype )
lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowercase = self.only_cross_attention
if isinstance(snake_case , snake_case ):
lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case , snake_case ):
lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
lowercase = []
lowercase = []
lowercase = block_out_channels[0]
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
for i, down_block_type in enumerate(self.down_block_types ):
lowercase = output_channel
lowercase = block_out_channels[i]
lowercase = i == len(snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowercase = FlaxCrossAttnDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowercase = FlaxDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case )
for _ in range(self.layers_per_block ):
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
if not is_final_block:
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
lowercase = down_blocks
lowercase = controlnet_down_blocks
# mid
lowercase = block_out_channels[-1]
lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=snake_case , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case , snake_case , snake_case , snake_case , snake_case = 1.0 , snake_case = True , snake_case = False , ):
lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowercase = jnp.flip(snake_case , axis=1 )
# 1. time
if not isinstance(snake_case , jnp.ndarray ):
lowercase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowercase = timesteps.astype(dtype=jnp.floataa )
lowercase = jnp.expand_dims(snake_case , 0 )
lowercase = self.time_proj(snake_case )
lowercase = self.time_embedding(snake_case )
# 2. pre-process
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.conv_in(snake_case )
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.controlnet_cond_embedding(snake_case )
sample += controlnet_cond
# 3. down
lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case , snake_case ):
lowercase , lowercase = down_block(snake_case , snake_case , snake_case , deterministic=not train )
else:
lowercase , lowercase = down_block(snake_case , snake_case , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowercase = self.mid_block(snake_case , snake_case , snake_case , deterministic=not train )
# 5. contronet blocks
lowercase = ()
for down_block_res_sample, controlnet_block in zip(snake_case , self.controlnet_down_blocks ):
lowercase = controlnet_block(snake_case )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowercase = controlnet_down_block_res_samples
lowercase = self.controlnet_mid_block(snake_case )
# 6. scaling
lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=snake_case , mid_block_res_sample=snake_case )
| 84 | 0 |
'''simple docstring'''
import random
from typing import Any
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
for _ in range(len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase_ : List[Any] =random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
lowerCAmelCase_ : str =random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
lowerCAmelCase_ , lowerCAmelCase_ : Any =data[b], data[a]
return data
if __name__ == "__main__":
__lowercase = [0, 1, 2, 3, 4, 5, 6, 7]
__lowercase = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 305 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
_UpperCamelCase : Dict = '''marian'''
_UpperCamelCase : List[str] = ['''past_key_values''']
_UpperCamelCase : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[int] , UpperCamelCase_ : Tuple=58101 , UpperCamelCase_ : int=None , UpperCamelCase_ : str=1024 , UpperCamelCase_ : List[str]=12 , UpperCamelCase_ : List[Any]=4096 , UpperCamelCase_ : Union[str, Any]=16 , UpperCamelCase_ : int=12 , UpperCamelCase_ : Optional[Any]=4096 , UpperCamelCase_ : Union[str, Any]=16 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Optional[Any]=0.0 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]="gelu" , UpperCamelCase_ : int=1024 , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : Dict=0.0 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : Optional[int]=0.0_2 , UpperCamelCase_ : Union[str, Any]=58100 , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Union[str, Any]=58100 , UpperCamelCase_ : Dict=0 , UpperCamelCase_ : int=0 , UpperCamelCase_ : int=True , **UpperCamelCase_ : Union[str, Any] , ):
lowerCAmelCase_ : Tuple =vocab_size
lowerCAmelCase_ : int =decoder_vocab_size or vocab_size
lowerCAmelCase_ : int =max_position_embeddings
lowerCAmelCase_ : Any =d_model
lowerCAmelCase_ : List[Any] =encoder_ffn_dim
lowerCAmelCase_ : List[Any] =encoder_layers
lowerCAmelCase_ : Any =encoder_attention_heads
lowerCAmelCase_ : Optional[int] =decoder_ffn_dim
lowerCAmelCase_ : List[str] =decoder_layers
lowerCAmelCase_ : Union[str, Any] =decoder_attention_heads
lowerCAmelCase_ : List[str] =dropout
lowerCAmelCase_ : int =attention_dropout
lowerCAmelCase_ : Optional[int] =activation_dropout
lowerCAmelCase_ : Union[str, Any] =activation_function
lowerCAmelCase_ : List[str] =init_std
lowerCAmelCase_ : List[Any] =encoder_layerdrop
lowerCAmelCase_ : Optional[int] =decoder_layerdrop
lowerCAmelCase_ : int =use_cache
lowerCAmelCase_ : Tuple =encoder_layers
lowerCAmelCase_ : Any =scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase_ : Union[str, Any] =share_encoder_decoder_embeddings
super().__init__(
pad_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , is_encoder_decoder=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , forced_eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __A ( self : str ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase_ : List[str] =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCAmelCase_ : Any ={0: '''batch'''}
lowerCAmelCase_ : Any ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowerCAmelCase_ : List[Any] ={0: '''batch''', 1: '''decoder_sequence'''}
lowerCAmelCase_ : int ={0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCAmelCase_ : List[str] =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] =self.num_layers
for i in range(UpperCamelCase_ ):
lowerCAmelCase_ : int ={0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCAmelCase_ : List[Any] ={0: '''batch''', 2: '''past_sequence + sequence'''}
else:
lowerCAmelCase_ : Optional[Any] =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __A ( self : Union[str, Any] ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase_ : List[str] =super().outputs
else:
lowerCAmelCase_ : Optional[Any] =super(UpperCamelCase_ , self ).outputs
if self.use_past:
lowerCAmelCase_ , lowerCAmelCase_ : Dict =self.num_layers
for i in range(UpperCamelCase_ ):
lowerCAmelCase_ : Optional[Any] ={0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCAmelCase_ : Optional[Any] ={0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __A ( self : int , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : int = -1 , UpperCamelCase_ : int = -1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[TensorType] = None , ):
lowerCAmelCase_ : Optional[Any] =self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Generate decoder inputs
lowerCAmelCase_ : List[Any] =seq_length if not self.use_past else 1
lowerCAmelCase_ : Dict =self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase_ : Union[str, Any] ={F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
lowerCAmelCase_ : List[Any] =dict(**UpperCamelCase_ , **UpperCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCAmelCase_ , lowerCAmelCase_ : Dict =common_inputs['''input_ids'''].shape
lowerCAmelCase_ : Tuple =common_inputs['''decoder_input_ids'''].shape[1]
lowerCAmelCase_ , lowerCAmelCase_ : Any =self.num_attention_heads
lowerCAmelCase_ : Optional[int] =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase_ : Optional[int] =decoder_seq_length + 3
lowerCAmelCase_ : List[Any] =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCAmelCase_ : Dict =torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(UpperCamelCase_ , UpperCamelCase_ )] , dim=1 )
lowerCAmelCase_ : int =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCAmelCase_ , lowerCAmelCase_ : Dict =self.num_layers
lowerCAmelCase_ : Union[str, Any] =min(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase_ : Optional[Any] =max(UpperCamelCase_ , UpperCamelCase_ ) - min_num_layers
lowerCAmelCase_ : Union[str, Any] ='''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(UpperCamelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
) )
# TODO: test this.
lowerCAmelCase_ : List[str] =encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(UpperCamelCase_ , UpperCamelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) )
return common_inputs
def __A ( self : Optional[Any] , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : int = -1 , UpperCamelCase_ : int = -1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[TensorType] = None , ):
lowerCAmelCase_ : str =self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] =common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCAmelCase_ : int =seqlen + 2
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] =self.num_layers
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] =self.num_attention_heads
lowerCAmelCase_ : Tuple =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase_ : Any =common_inputs['''attention_mask'''].dtype
lowerCAmelCase_ : List[str] =torch.cat(
[common_inputs['''attention_mask'''], torch.ones(UpperCamelCase_ , UpperCamelCase_ , dtype=UpperCamelCase_ )] , dim=1 )
lowerCAmelCase_ : List[str] =[
(torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) for _ in range(UpperCamelCase_ )
]
return common_inputs
def __A ( self : List[Any] , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : int = -1 , UpperCamelCase_ : int = -1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase_ : Tuple =compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase_ : List[Any] =tokenizer.num_special_tokens_to_add(UpperCamelCase_ )
lowerCAmelCase_ : Tuple =compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase_ : List[Any] =[''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCAmelCase_ : Any =dict(tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ ) )
return common_inputs
def __A ( self : List[Any] , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : int = -1 , UpperCamelCase_ : int = -1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase_ : Optional[Any] =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
else:
lowerCAmelCase_ : int =self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
return common_inputs
def __A ( self : Any , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase_ : Optional[Any] =super()._flatten_past_key_values_(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else:
lowerCAmelCase_ : Dict =super(UpperCamelCase_ , self )._flatten_past_key_values_(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
@property
def __A ( self : Union[str, Any] ):
return 1E-4
| 305 | 1 |
'''simple docstring'''
lowercase =[0, 2, 4, 6, 8]
lowercase =[1, 3, 5, 7, 9]
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : int ):
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 1_0
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
_UpperCAmelCase : Any =0
for digit in range(1_0 ):
_UpperCAmelCase : List[str] =digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 1_0 , __snake_case , __snake_case )
return result
_UpperCAmelCase : str =0
for digita in range(1_0 ):
_UpperCAmelCase : List[Any] =digita
if (remainder + digita) % 2 == 0:
_UpperCAmelCase : str =ODD_DIGITS
else:
_UpperCAmelCase : Tuple =EVEN_DIGITS
for digita in other_parity_digits:
_UpperCAmelCase : Any =digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 1_0 , __snake_case , __snake_case , )
return result
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] = 9 ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(__snake_case , 0 , [0] * length , __snake_case )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 446 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_a = logging.get_logger(__name__)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['pixel_values']
def __init__( self , __a = True , __a = None , __a = PILImageResampling.BICUBIC , __a = True , __a = True , __a = 1 / 2_55 , __a = None , __a = True , __a = None , __a = None , **__a , ) -> None:
'''simple docstring'''
super().__init__(**__a)
_UpperCamelCase = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
_UpperCamelCase = get_size_dict(__a)
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_UpperCamelCase = get_size_dict(__a , default_to_square=__a , param_name='''crop_size''')
_UpperCamelCase = do_resize
_UpperCamelCase = do_rescale
_UpperCamelCase = do_normalize
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = rescale_factor
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase ( self , __a , __a , __a = PILImageResampling.BILINEAR , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(__a)
if "shortest_edge" in size:
_UpperCamelCase = get_resize_output_image_size(__a , size=size['''shortest_edge'''] , default_to_square=__a)
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_UpperCamelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''')
return resize(__a , size=__a , resample=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(__a)
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''')
return center_crop(__a , size=(size['''height'''], size['''width''']) , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a = None , **__a) -> np.ndarray:
'''simple docstring'''
return rescale(__a , scale=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> BatchFeature:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(__a , param_name='''crop_size''' , default_to_square=__a)
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(__a)
if not is_batched(__a):
_UpperCamelCase = [images]
if not valid_images(__a):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(__a) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=__a , size=__a , resample=__a) for image in images]
if do_center_crop:
_UpperCamelCase = [self.center_crop(image=__a , size=__a) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=__a , scale=__a) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=__a , mean=__a , std=__a) for image in images]
_UpperCamelCase = [to_channel_dimension_format(__a , __a) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=__a , tensor_type=__a)
| 19 | 0 |
'''simple docstring'''
import warnings
warnings.warn(
'''memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '''
'''`from accelerate import find_executable_batch_size` to avoid this warning.''',
FutureWarning,
)
| 312 |
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowerCamelCase_ ( enum.Enum ):
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
@add_end_docstrings(__a )
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self : List[str] , *_A : List[Any] , **_A : List[str] ):
'''simple docstring'''
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
UpperCAmelCase__ : Any = None
if self.model.config.prefix is not None:
UpperCAmelCase__ : int = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
UpperCAmelCase__ : List[Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self._sanitize_parameters(prefix=_A , **self._forward_params )
UpperCAmelCase__ : Any = {**self._preprocess_params, **preprocess_params}
UpperCAmelCase__ : Optional[int] = {**self._forward_params, **forward_params}
def lowercase_ ( self : Dict , _A : int=None , _A : Optional[Any]=None , _A : Union[str, Any]=None , _A : Optional[int]=None , _A : Dict=None , _A : str=None , _A : List[str]=None , _A : List[str]=None , **_A : int , ):
'''simple docstring'''
UpperCAmelCase__ : Any = {}
if prefix is not None:
UpperCAmelCase__ : Optional[int] = prefix
if prefix:
UpperCAmelCase__ : Any = self.tokenizer(
_A , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
UpperCAmelCase__ : Dict = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
''' [None, \'hole\']''' )
UpperCAmelCase__ : Optional[int] = handle_long_generation
preprocess_params.update(_A )
UpperCAmelCase__ : Union[str, Any] = generate_kwargs
UpperCAmelCase__ : Optional[Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
UpperCAmelCase__ : Optional[int] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
UpperCAmelCase__ : Union[str, Any] = ReturnType.TENSORS
if return_type is not None:
UpperCAmelCase__ : Optional[int] = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase__ : List[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase__ : Optional[int] = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
UpperCAmelCase__ : str = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowercase_ ( self : Tuple , *_A : List[Any] , **_A : Optional[int] ):
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_A , **_A )
def __call__( self : List[Any] , _A : List[Any] , **_A : List[Any] ):
'''simple docstring'''
return super().__call__(_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : Tuple , _A : List[str]="" , _A : Optional[int]=None , **_A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.tokenizer(
prefix + prompt_text , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
UpperCAmelCase__ : Union[str, Any] = prompt_text
if handle_long_generation == "hole":
UpperCAmelCase__ : Any = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
UpperCAmelCase__ : str = generate_kwargs['''max_new_tokens''']
else:
UpperCAmelCase__ : Tuple = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
UpperCAmelCase__ : Optional[Any] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
UpperCAmelCase__ : Optional[int] = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
UpperCAmelCase__ : List[Any] = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def lowercase_ ( self : List[Any] , _A : Any , **_A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = model_inputs['''input_ids''']
UpperCAmelCase__ : Tuple = model_inputs.get('''attention_mask''' , _A )
# Allow empty prompts
if input_ids.shape[1] == 0:
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : int = None
UpperCAmelCase__ : Tuple = 1
else:
UpperCAmelCase__ : Union[str, Any] = input_ids.shape[0]
UpperCAmelCase__ : int = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
UpperCAmelCase__ : Dict = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
UpperCAmelCase__ : Tuple = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
UpperCAmelCase__ : Optional[Any] = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
UpperCAmelCase__ : List[Any] = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
UpperCAmelCase__ : Optional[Any] = self.model.generate(input_ids=_A , attention_mask=_A , **_A )
UpperCAmelCase__ : Any = generated_sequence.shape[0]
if self.framework == "pt":
UpperCAmelCase__ : Optional[int] = generated_sequence.reshape(_A , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase__ : List[Any] = tf.reshape(_A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowercase_ ( self : Any , _A : Dict , _A : Optional[int]=ReturnType.FULL_TEXT , _A : int=True ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = model_outputs['''generated_sequence'''][0]
UpperCAmelCase__ : Tuple = model_outputs['''input_ids''']
UpperCAmelCase__ : List[str] = model_outputs['''prompt_text''']
UpperCAmelCase__ : Any = generated_sequence.numpy().tolist()
UpperCAmelCase__ : Union[str, Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
UpperCAmelCase__ : List[str] = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
UpperCAmelCase__ : List[str] = self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
UpperCAmelCase__ : Union[str, Any] = 0
else:
UpperCAmelCase__ : List[str] = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) )
if return_type == ReturnType.FULL_TEXT:
UpperCAmelCase__ : Dict = prompt_text + text[prompt_length:]
else:
UpperCAmelCase__ : List[str] = text[prompt_length:]
UpperCAmelCase__ : Any = {'''generated_text''': all_text}
records.append(_A )
return records
| 312 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: int =logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : int=False ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCAmelCase_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : Union[str, Any]=False ) -> Union[str, Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase_ = ""
else:
UpperCAmelCase_ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase_ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase_ = in_proj_bias[: config.hidden_size]
UpperCAmelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase_ = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( snake_case_ : int ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Any , snake_case_ : Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = dct.pop(snake_case_ )
UpperCAmelCase_ = val
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Union[str, Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = ViTConfig()
UpperCAmelCase_ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
UpperCAmelCase_ = True
UpperCAmelCase_ = int(vit_name[-12:-10] )
UpperCAmelCase_ = int(vit_name[-9:-6] )
else:
UpperCAmelCase_ = 10_00
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "imagenet-1k-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = int(vit_name[-6:-4] )
UpperCAmelCase_ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
UpperCAmelCase_ = 1_92
UpperCAmelCase_ = 7_68
UpperCAmelCase_ = 12
UpperCAmelCase_ = 3
elif vit_name[9:].startswith("small" ):
UpperCAmelCase_ = 3_84
UpperCAmelCase_ = 15_36
UpperCAmelCase_ = 12
UpperCAmelCase_ = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
UpperCAmelCase_ = 7_68
UpperCAmelCase_ = 23_04
UpperCAmelCase_ = 8
UpperCAmelCase_ = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
UpperCAmelCase_ = 10_24
UpperCAmelCase_ = 40_96
UpperCAmelCase_ = 24
UpperCAmelCase_ = 16
elif vit_name[4:].startswith("huge" ):
UpperCAmelCase_ = 12_80
UpperCAmelCase_ = 51_20
UpperCAmelCase_ = 32
UpperCAmelCase_ = 16
# load original model from timm
UpperCAmelCase_ = timm.create_model(snake_case_ , pretrained=snake_case_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase_ = timm_model.state_dict()
if base_model:
remove_classification_head_(snake_case_ )
UpperCAmelCase_ = create_rename_keys(snake_case_ , snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
read_in_q_k_v(snake_case_ , snake_case_ , snake_case_ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
UpperCAmelCase_ = ViTModel(snake_case_ ).eval()
else:
UpperCAmelCase_ = ViTForImageClassification(snake_case_ ).eval()
model.load_state_dict(snake_case_ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
UpperCAmelCase_ = DeiTImageProcessor(size=config.image_size )
else:
UpperCAmelCase_ = ViTImageProcessor(size=config.image_size )
UpperCAmelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase_ = encoding["pixel_values"]
UpperCAmelCase_ = model(snake_case_ )
if base_model:
UpperCAmelCase_ = timm_model.forward_features(snake_case_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(snake_case_ , outputs.pooler_output , atol=1E-3 )
else:
UpperCAmelCase_ = timm_model(snake_case_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case_ , outputs.logits , atol=1E-3 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE_: List[str] =parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 78 | '''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCAmelCase_ ( snake_case_ : ndarray ) -> float:
'''simple docstring'''
return np.dot(snake_case_ , snake_case_ )
class __A :
def __init__(self : int , *,
__a : float = np.inf , __a : str = "linear" , __a : float = 0.0 , ):
UpperCAmelCase_ = regularization
UpperCAmelCase_ = gamma
if kernel == "linear":
UpperCAmelCase_ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("rbf kernel requires gamma" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("gamma must be float or int" )
if not self.gamma > 0:
raise ValueError("gamma must be > 0" )
UpperCAmelCase_ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCAmelCase_ = f"""Unknown kernel: {kernel}"""
raise ValueError(__a )
def _lowercase (self : Optional[int] , __a : ndarray , __a : ndarray ):
return np.dot(__a , __a )
def _lowercase (self : Optional[int] , __a : ndarray , __a : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def _lowercase (self : str , __a : list[ndarray] , __a : ndarray ):
UpperCAmelCase_ = observations
UpperCAmelCase_ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCAmelCase_) , ) = np.shape(__a )
def to_minimize(__a : ndarray ) -> float:
UpperCAmelCase_ = 0
((UpperCAmelCase_) , ) = np.shape(__a )
for i in range(__a ):
for j in range(__a ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(__a )
UpperCAmelCase_ = LinearConstraint(__a , 0 , 0 )
UpperCAmelCase_ = Bounds(0 , self.regularization )
UpperCAmelCase_ = minimize(
__a , np.ones(__a ) , bounds=__a , constraints=[ly_contraint] ).x
UpperCAmelCase_ = l_star
# calculating mean offset of separation plane to points
UpperCAmelCase_ = 0
for i in range(__a ):
for j in range(__a ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
UpperCAmelCase_ = s / n
def _lowercase (self : Optional[int] , __a : ndarray ):
UpperCAmelCase_ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , __a )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 1 |
"""simple docstring"""
import argparse
import datetime
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Any = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
UpperCAmelCase__ : Optional[int] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(__UpperCamelCase ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
UpperCAmelCase__ : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
UpperCAmelCase__ : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
UpperCAmelCase__ : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
UpperCAmelCase__ : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
UpperCAmelCase__ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
UpperCAmelCase__ : Dict = datetime.date(int(__UpperCamelCase ) , int(__UpperCamelCase ) , int(__UpperCamelCase ) )
# Start math
if m <= 2:
UpperCAmelCase__ : Optional[Any] = y - 1
UpperCAmelCase__ : Any = m + 12
# maths var
UpperCAmelCase__ : int = int(str(__UpperCamelCase )[:2] )
UpperCAmelCase__ : int = int(str(__UpperCamelCase )[2:] )
UpperCAmelCase__ : int = int(2.6 * m - 5.39 )
UpperCAmelCase__ : int = int(c / 4 )
UpperCAmelCase__ : int = int(k / 4 )
UpperCAmelCase__ : int = int(d + k )
UpperCAmelCase__ : int = int(t + u + v + x )
UpperCAmelCase__ : int = int(z - (2 * c) )
UpperCAmelCase__ : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
UpperCAmelCase__ : str = F"Your date {date_input}, is a {days[str(__UpperCamelCase )]}!"
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
__UpperCAmelCase = parser.parse_args()
zeller(args.date_input)
| 194 |
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : str = []
if len(__UpperCamelCase ) == 1:
return [nums.copy()]
for _ in range(len(__UpperCamelCase ) ):
UpperCAmelCase__ : Tuple = nums.pop(0 )
UpperCAmelCase__ : List[str] = permute(__UpperCamelCase )
for perm in permutations:
perm.append(__UpperCamelCase )
result.extend(__UpperCamelCase )
nums.append(__UpperCamelCase )
return result
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
def backtrack(__UpperCamelCase ):
if start == len(__UpperCamelCase ) - 1:
output.append(nums[:] )
else:
for i in range(__UpperCamelCase , len(__UpperCamelCase ) ):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = nums[i], nums[start]
backtrack(start + 1 )
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = nums[i], nums[start] # backtrack
UpperCAmelCase__ : List[str] = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
__UpperCAmelCase = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 194 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__lowercase : Optional[Any] =random.Random()
def a__ ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ):
'''simple docstring'''
if rng is None:
UpperCAmelCase_ =global_rng
UpperCAmelCase_ =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class A ( unittest.TestCase ):
def __init__( self: Optional[int] , _lowerCAmelCase: Any , _lowerCAmelCase: str=7 , _lowerCAmelCase: Optional[int]=400 , _lowerCAmelCase: Optional[Any]=2000 , _lowerCAmelCase: Union[str, Any]=10 , _lowerCAmelCase: Optional[int]=160 , _lowerCAmelCase: Any=8 , _lowerCAmelCase: str=0.0 , _lowerCAmelCase: Optional[int]=4000 , _lowerCAmelCase: Dict=False , _lowerCAmelCase: Optional[Any]=True , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =min_seq_length
UpperCAmelCase_ =max_seq_length
UpperCAmelCase_ =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase_ =padding_value
UpperCAmelCase_ =sampling_rate
UpperCAmelCase_ =return_attention_mask
UpperCAmelCase_ =do_normalize
UpperCAmelCase_ =feature_size
UpperCAmelCase_ =chunk_length
UpperCAmelCase_ =hop_length
def lowerCAmelCase__ ( self: Union[str, Any] ) -> int:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: List[str]=False , _lowerCAmelCase: Dict=False ) -> str:
'''simple docstring'''
def _flatten(_lowerCAmelCase: List[str] ):
return list(itertools.chain(*_lowerCAmelCase ) )
if equal_length:
UpperCAmelCase_ =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase_ =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase_ =[np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A ( __lowercase , unittest.TestCase ):
_snake_case =WhisperFeatureExtractor if is_speech_available() else None
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =WhisperFeatureExtractionTester(self )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ =feat_extract_first.save_pretrained(_lowerCAmelCase )[0]
check_json_file_has_correct_format(_lowerCAmelCase )
UpperCAmelCase_ =self.feature_extraction_class.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =feat_extract_first.to_dict()
UpperCAmelCase_ =feat_extract_second.to_dict()
UpperCAmelCase_ =feat_extract_first.mel_filters
UpperCAmelCase_ =feat_extract_second.mel_filters
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ =os.path.join(_lowerCAmelCase , "feat_extract.json" )
feat_extract_first.to_json_file(_lowerCAmelCase )
UpperCAmelCase_ =self.feature_extraction_class.from_json_file(_lowerCAmelCase )
UpperCAmelCase_ =feat_extract_first.to_dict()
UpperCAmelCase_ =feat_extract_second.to_dict()
UpperCAmelCase_ =feat_extract_first.mel_filters
UpperCAmelCase_ =feat_extract_second.mel_filters
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase_ =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCAmelCase_ =[np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
UpperCAmelCase_ =feature_extractor(_lowerCAmelCase , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
UpperCAmelCase_ =feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
UpperCAmelCase_ =feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# Test batched
UpperCAmelCase_ =feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_features
UpperCAmelCase_ =feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase_ =[floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCAmelCase_ =np.asarray(_lowerCAmelCase )
UpperCAmelCase_ =feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_features
UpperCAmelCase_ =feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# Test truncation required
UpperCAmelCase_ =[floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
UpperCAmelCase_ =[np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
UpperCAmelCase_ =[x[: feature_extractor.n_samples] for x in speech_inputs]
UpperCAmelCase_ =[np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs_truncated]
UpperCAmelCase_ =feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_features
UpperCAmelCase_ =feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
def lowerCAmelCase__ ( self: str ) -> Tuple:
'''simple docstring'''
import torch
UpperCAmelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_ =np.random.rand(100 , 32 ).astype(np.floataa )
UpperCAmelCase_ =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase_ =feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCAmelCase_ =feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
UpperCAmelCase_ =ds.sort("id" ).select(range(_lowerCAmelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowerCAmelCase__ ( self: str ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
UpperCAmelCase_ =self._load_datasamples(1 )
UpperCAmelCase_ =WhisperFeatureExtractor()
UpperCAmelCase_ =feature_extractor(_lowerCAmelCase , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _lowerCAmelCase , atol=1e-4 ) )
def lowerCAmelCase__ ( self: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_ =self._load_datasamples(1 )[0]
UpperCAmelCase_ =((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
UpperCAmelCase_ =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_lowerCAmelCase )[0]
self.assertTrue(np.all(np.mean(_lowerCAmelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowerCAmelCase ) - 1 ) < 1e-3 ) )
| 54 |
import random
from typing import Any
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> list[Any]:
for _ in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowercase__ : Union[str, Any] = random.randint(0 ,len(SCREAMING_SNAKE_CASE_ ) - 1 )
lowercase__ : str = random.randint(0 ,len(SCREAMING_SNAKE_CASE_ ) - 1 )
lowercase__ , lowercase__ : Any = data[b], data[a]
return data
if __name__ == "__main__":
__a : Tuple = [0, 1, 2, 3, 4, 5, 6, 7]
__a : str = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 397 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowercase ( unittest.TestCase ):
def __init__( self , A__ , A__=7 , A__=3 , A__=18 , A__=30 , A__=4_00 , A__=True , A__=None , A__=True , A__=None , A__=True , ) -> Dict:
snake_case = size if size is not None else {'''shortest_edge''': 20}
snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
snake_case = parent
snake_case = batch_size
snake_case = num_channels
snake_case = image_size
snake_case = min_resolution
snake_case = max_resolution
snake_case = do_resize
snake_case = size
snake_case = do_center_crop
snake_case = crop_size
snake_case = do_flip_channel_order
def UpperCamelCase ( self ) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class _lowercase ( __a , unittest.TestCase ):
_UpperCAmelCase = MobileViTImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ) -> str:
snake_case = MobileViTImageProcessingTester(self )
@property
def UpperCamelCase ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ) -> Dict:
snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , '''do_resize''' ) )
self.assertTrue(hasattr(A__ , '''size''' ) )
self.assertTrue(hasattr(A__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(A__ , '''center_crop''' ) )
self.assertTrue(hasattr(A__ , '''do_flip_channel_order''' ) )
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCamelCase ( self ) -> Any:
pass
def UpperCamelCase ( self ) -> Dict:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase ( self ) -> Dict:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase ( self ) -> str:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 710 |
'''simple docstring'''
import argparse
import copy
def __UpperCamelCase ( a : Union[str, Any] ) ->Tuple:
snake_case = {}
with open(a ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case = []
_list.append([line.split()[1], line.split()[2]] )
snake_case = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case = []
_list.append([line.split()[0], line.split()[2]] )
snake_case = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def __UpperCamelCase ( a : Dict , a : Tuple ) ->int:
with open(a ) as f:
snake_case = f.read(1 )
snake_case = start_node
snake_case = []
snake_case = start_node
snake_case = 0
while visiting not in first_solution:
snake_case = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(a ) and k[0] not in first_solution:
snake_case = k[1]
snake_case = k[0]
first_solution.append(a )
snake_case = distance_of_first_solution + int(a )
snake_case = best_node
first_solution.append(a )
snake_case = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def __UpperCamelCase ( a : Optional[int] , a : str ) ->str:
snake_case = []
for n in solution[1:-1]:
snake_case = solution.index(a )
for kn in solution[1:-1]:
snake_case = solution.index(a )
if n == kn:
continue
snake_case = copy.deepcopy(a )
snake_case = kn
snake_case = n
snake_case = 0
for k in _tmp[:-1]:
snake_case = _tmp[_tmp.index(a ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case = distance + int(i[1] )
_tmp.append(a )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda a : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def __UpperCamelCase ( a : Any , a : Optional[Any] , a : int , a : Optional[int] , a : Union[str, Any] ) ->List[Any]:
snake_case = 1
snake_case = first_solution
snake_case = []
snake_case = distance_of_first_solution
snake_case = solution
while count <= iters:
snake_case = find_neighborhood(a , a )
snake_case = 0
snake_case = neighborhood[index_of_best_solution]
snake_case = len(a ) - 1
snake_case = False
while not found:
snake_case = 0
while i < len(a ):
if best_solution[i] != solution[i]:
snake_case = best_solution[i]
snake_case = solution[i]
break
snake_case = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case = True
snake_case = best_solution[:-1]
snake_case = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case = cost
snake_case = solution
else:
snake_case = index_of_best_solution + 1
snake_case = neighborhood[index_of_best_solution]
if len(a ) >= size:
tabu_list.pop(0 )
snake_case = count + 1
return best_solution_ever, best_cost
def __UpperCamelCase ( a : Union[str, Any]=None ) ->Optional[Any]:
snake_case = generate_neighbours(args.File )
snake_case , snake_case = generate_first_solution(
args.File , a )
snake_case , snake_case = tabu_search(
a , a , a , args.Iterations , args.Size , )
print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""" )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 44 | 0 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class a__ :
def __init__(self : Optional[int], __UpperCAmelCase : list[tuple[float, float]] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
SCREAMING_SNAKE_CASE : int = len(__UpperCAmelCase ) - 1
def lowercase__ (self : List[str], __UpperCAmelCase : float ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
SCREAMING_SNAKE_CASE : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree, __UpperCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__UpperCAmelCase ), 5 ) == 1
return output_values
def lowercase__ (self : Optional[int], __UpperCAmelCase : float ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
SCREAMING_SNAKE_CASE : Union[str, Any] = self.basis_function(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = 0.0
SCREAMING_SNAKE_CASE : str = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowercase__ (self : int, __UpperCAmelCase : float = 0.01 ) -> List[str]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
SCREAMING_SNAKE_CASE : list[float] = [] # x coordinates of points to plot
SCREAMING_SNAKE_CASE : list[float] = [] # y coordinates of points to plot
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0
while t <= 1:
SCREAMING_SNAKE_CASE : Optional[Any] = self.bezier_curve_function(__UpperCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
SCREAMING_SNAKE_CASE : Dict = [i[0] for i in self.list_of_points]
SCREAMING_SNAKE_CASE : Any = [i[1] for i in self.list_of_points]
plt.plot(
__UpperCAmelCase, __UpperCAmelCase, color='''blue''', label='''Curve of Degree ''' + str(self.degree ), )
plt.scatter(__UpperCAmelCase, __UpperCAmelCase, color='''red''', label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 507 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class a__ ( unittest.TestCase ):
def lowercase__ (self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [[1, 2, 4], [1, 2, 3, 4]]
SCREAMING_SNAKE_CASE : Tuple = DisjunctiveConstraint(__UpperCAmelCase )
self.assertTrue(isinstance(dc.token_ids, __UpperCAmelCase ) )
with self.assertRaises(__UpperCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__UpperCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def lowercase__ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCAmelCase ):
DisjunctiveConstraint(__UpperCAmelCase ) # fails here
def lowercase__ (self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [[1, 2, 3], [1, 2, 4]]
SCREAMING_SNAKE_CASE : List[Any] = DisjunctiveConstraint(__UpperCAmelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = dc.update(1 )
SCREAMING_SNAKE_CASE : Any = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = dc.update(2 )
SCREAMING_SNAKE_CASE : Any = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = dc.update(3 )
SCREAMING_SNAKE_CASE : List[Any] = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def lowercase__ (self : Tuple ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
SCREAMING_SNAKE_CASE : Union[str, Any] = DisjunctiveConstraint(__UpperCAmelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 507 | 1 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase ( snake_case_ ):
def __lt__( self , _lowerCAmelCase ):
return self[-1] < other[-1]
def __eq__( self , _lowerCAmelCase ):
return self[-1] == other[-1]
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list )->list:
_lowerCAmelCase = []
# sort into stacks
for element in collection:
_lowerCAmelCase = Stack([element] )
_lowerCAmelCase = bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if i != len(_SCREAMING_SNAKE_CASE ):
stacks[i].append(_SCREAMING_SNAKE_CASE )
else:
stacks.append(_SCREAMING_SNAKE_CASE )
# use a heap-based merge to merge stack efficiently
_lowerCAmelCase = merge(*(reversed(_SCREAMING_SNAKE_CASE ) for stack in stacks) )
return collection
if __name__ == "__main__":
UpperCAmelCase_ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase_ = [int(item) for item in user_input.split(",")]
print(patience_sort(unsorted)) | 719 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCAmelCase ( snake_case_ ):
def __lowerCAmelCase ( self ):
_lowerCAmelCase = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase = '''pt'''
_lowerCAmelCase = '''tf'''
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowerCAmelCase )
model_tf.save_pretrained(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''mock_framework'''
# Framework provided - return whatever the user provides
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def __lowerCAmelCase ( self ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model ) | 664 | 0 |
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Dict ):
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : List[Any]=0 ):
"""simple docstring"""
return sorted(__UpperCamelCase ,key=lambda __UpperCamelCase : x[column] )
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : str ,__UpperCamelCase : Optional[int]=float("inf" ) ):
"""simple docstring"""
for i in range(points_counts - 1 ):
for j in range(i + 1 ,__UpperCamelCase ):
A_ = euclidean_distance_sqr(points[i] ,points[j] )
if current_dis < min_dis:
A_ = current_dis
return min_dis
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : str ,__UpperCamelCase : Optional[int]=float("inf" ) ):
"""simple docstring"""
for i in range(min(6 ,points_counts - 1 ) ,__UpperCamelCase ):
for j in range(max(0 ,i - 6 ) ,__UpperCamelCase ):
A_ = euclidean_distance_sqr(points[i] ,points[j] )
if current_dis < min_dis:
A_ = current_dis
return min_dis
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
if points_counts <= 3:
return dis_between_closest_pair(__UpperCamelCase ,__UpperCamelCase )
# recursion
A_ = points_counts // 2
A_ = closest_pair_of_points_sqr(
__UpperCamelCase ,points_sorted_on_y[:mid] ,__UpperCamelCase )
A_ = closest_pair_of_points_sqr(
__UpperCamelCase ,points_sorted_on_y[mid:] ,points_counts - mid )
A_ = min(__UpperCamelCase ,__UpperCamelCase )
A_ = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(__UpperCamelCase )
A_ = dis_between_closest_in_strip(
__UpperCamelCase ,len(__UpperCamelCase ) ,__UpperCamelCase )
return min(__UpperCamelCase ,__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : int ):
"""simple docstring"""
A_ = column_based_sort(__UpperCamelCase ,column=0 )
A_ = column_based_sort(__UpperCamelCase ,column=1 )
return (
closest_pair_of_points_sqr(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
) ** 0.5
if __name__ == "__main__":
__a :List[Any] = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('Distance:', closest_pair_of_points(points, len(points))) | 86 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
SCREAMING_SNAKE_CASE : Union[str, Any] = "Create a default config file for Accelerate with only a few flags set."
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[Any]="no" , SCREAMING_SNAKE_CASE_ : str = default_json_config_file , SCREAMING_SNAKE_CASE_ : bool = False ):
"""simple docstring"""
a_ : Optional[Any] = Path(SCREAMING_SNAKE_CASE_ )
path.parent.mkdir(parents=SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
a_ : Tuple = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
a_ : str = {
"""compute_environment""": """LOCAL_MACHINE""",
"""mixed_precision""": mixed_precision,
}
if torch.cuda.is_available():
a_ : Union[str, Any] = torch.cuda.device_count()
a_ : Any = num_gpus
a_ : Union[str, Any] = False
if num_gpus > 1:
a_ : str = """MULTI_GPU"""
else:
a_ : List[str] = """NO"""
elif is_xpu_available() and use_xpu:
a_ : List[Any] = torch.xpu.device_count()
a_ : List[str] = num_xpus
a_ : List[Any] = False
if num_xpus > 1:
a_ : List[str] = """MULTI_XPU"""
else:
a_ : Union[str, Any] = """NO"""
elif is_npu_available():
a_ : Tuple = torch.npu.device_count()
a_ : Union[str, Any] = num_npus
a_ : List[Any] = False
if num_npus > 1:
a_ : List[str] = """MULTI_NPU"""
else:
a_ : Optional[int] = """NO"""
else:
a_ : List[str] = 0
a_ : Optional[int] = True
a_ : Optional[Any] = 1
a_ : List[str] = """NO"""
a_ : int = ClusterConfig(**SCREAMING_SNAKE_CASE_ )
config.to_json_file(SCREAMING_SNAKE_CASE_ )
return path
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
a_ : Dict = parser.add_parser("""default""" , parents=SCREAMING_SNAKE_CASE_ , help=SCREAMING_SNAKE_CASE_ , formatter_class=SCREAMING_SNAKE_CASE_ )
parser.add_argument(
"""--config_file""" , default=SCREAMING_SNAKE_CASE_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , dest="""save_location""" , )
parser.add_argument(
"""--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=SCREAMING_SNAKE_CASE_ , help="""Whether or not to use mixed precision training. """
"""Choose between FP16 and BF16 (bfloat16) training. """
"""BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , )
parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
return parser
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
a_ : int = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 419 | 0 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
__SCREAMING_SNAKE_CASE =[
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
for attribute in key.split('''.''' ):
SCREAMING_SNAKE_CASE_ = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
SCREAMING_SNAKE_CASE_ = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
SCREAMING_SNAKE_CASE_ = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE_ = value
else:
SCREAMING_SNAKE_CASE_ = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def a (_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE_ = hf_model.feature_extractor
SCREAMING_SNAKE_CASE_ = hf_model.adapter
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE_ = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
SCREAMING_SNAKE_CASE_ = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
SCREAMING_SNAKE_CASE_ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE_ = name.split(_lowerCAmelCase )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE_ = mapped_key.replace('''*''' , _lowerCAmelCase )
if "weight_g" in name:
SCREAMING_SNAKE_CASE_ = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE_ = '''weight_v'''
elif "bias" in name:
SCREAMING_SNAKE_CASE_ = '''bias'''
elif "weight" in name:
SCREAMING_SNAKE_CASE_ = '''weight'''
else:
SCREAMING_SNAKE_CASE_ = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE_ = name.split('''.''' )
SCREAMING_SNAKE_CASE_ = int(items[0] )
SCREAMING_SNAKE_CASE_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
SCREAMING_SNAKE_CASE_ = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE_ = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
SCREAMING_SNAKE_CASE_ = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE_ = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCAmelCase )
def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = full_name.split('''adaptor.''' )[-1]
SCREAMING_SNAKE_CASE_ = name.split('''.''' )
if items[1].isdigit():
SCREAMING_SNAKE_CASE_ = int(items[1] )
else:
SCREAMING_SNAKE_CASE_ = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."
SCREAMING_SNAKE_CASE_ = value
logger.info(F"Adapter proj layer norm bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."
SCREAMING_SNAKE_CASE_ = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."
SCREAMING_SNAKE_CASE_ = value
logger.info(F"Adapter proj layer bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."
SCREAMING_SNAKE_CASE_ = value
logger.info(F"Adapter proj layer weight was initialized from {full_name}." )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."
SCREAMING_SNAKE_CASE_ = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."
SCREAMING_SNAKE_CASE_ = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
else:
unused_weights.append(_lowerCAmelCase )
def a (_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = emb.weight.shape
SCREAMING_SNAKE_CASE_ = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = emb.weight.data
return lin_layer
@torch.no_grad()
def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
SCREAMING_SNAKE_CASE_ = WavaVecaConfig.from_pretrained(
_lowerCAmelCase , add_adapter=_lowerCAmelCase , adapter_stride=_lowerCAmelCase , adapter_kernel_size=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , output_hidden_size=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = MBartConfig.from_pretrained(_lowerCAmelCase )
# load model
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
} , )
SCREAMING_SNAKE_CASE_ = model[0].eval()
# load feature extractor
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(_lowerCAmelCase , use_auth_token=_lowerCAmelCase )
# set weights for wav2vec2 encoder
SCREAMING_SNAKE_CASE_ = WavaVecaModel(_lowerCAmelCase )
recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase )
# load decoder weights
SCREAMING_SNAKE_CASE_ = MBartForCausalLM(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase )
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
SCREAMING_SNAKE_CASE_ = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = MBartaaTokenizer(_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = hf_wavavec.config.to_dict()
SCREAMING_SNAKE_CASE_ = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE_ = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE_ = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE_ = '''mbart50'''
SCREAMING_SNAKE_CASE_ = '''wav2vec2'''
SCREAMING_SNAKE_CASE_ = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE_ = 2_5_0_0_0_4
SCREAMING_SNAKE_CASE_ = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE_ = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
feature_extractor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1_024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=250_004, type=int, help="""`decoder_start_token_id` of model config""")
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 716 |
def a (_lowerCAmelCase ):
if number > 0:
raise ValueError('''input must be a negative integer''' )
SCREAMING_SNAKE_CASE_ = len(bin(_lowerCAmelCase )[3:] )
SCREAMING_SNAKE_CASE_ = bin(abs(_lowerCAmelCase ) - (1 << binary_number_length) )[3:]
SCREAMING_SNAKE_CASE_ = (
(
'''1'''
+ '''0''' * (binary_number_length - len(_lowerCAmelCase ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 0 |
from typing import Dict
from .base import GenericTensor, Pipeline
class lowerCamelCase_ ( lowerCamelCase ):
def A ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
if tokenize_kwargs is None:
__magic_name__ :List[str] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
__magic_name__ :List[Any] = truncation
__magic_name__ :Dict = tokenize_kwargs
__magic_name__ :str = {}
if return_tensors is not None:
__magic_name__ :Any = return_tensors
return preprocess_params, {}, postprocess_params
def A ( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :str = self.framework
__magic_name__ :Optional[Any] = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
return model_inputs
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.model(**__lowerCAmelCase )
return model_outputs
def A ( self , __lowerCAmelCase , __lowerCAmelCase=False ):
"""simple docstring"""
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return super().__call__(*__lowerCAmelCase , **__lowerCAmelCase )
| 0 |
from sklearn.metrics import matthews_corrcoef
import datasets
SCREAMING_SNAKE_CASE__ : Optional[Any] = """
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results['matthews_correlation'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results['matthews_correlation'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results['matthews_correlation'], 2))
-0.25
"""
SCREAMING_SNAKE_CASE__ : int = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def A ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ):
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(__lowerCAmelCase , __lowerCAmelCase , sample_weight=__lowerCAmelCase ) ),
}
| 0 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
snake_case__ : List[Any] = DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
snake_case__ : Optional[Any] = 1024
snake_case__ : Union[str, Any] = 4096
snake_case__ : Any = 24
snake_case__ : int = 16
snake_case__ : Dict = [5, 11, 17, 23]
snake_case__ : List[str] = [256, 512, 1024, 1024]
snake_case__ : Optional[Any] = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
snake_case__ : int = 768
snake_case__ : Tuple = [1, 1, 1, 0.5]
snake_case__ : Union[str, Any] = [256, 512, 768, 768]
snake_case__ : Optional[int] = 150
snake_case__ : Any = 16
snake_case__ : Optional[Any] = (1, 384, 384)
snake_case__ : int = False
snake_case__ : List[str] = 'project'
if "ade" in checkpoint_url:
snake_case__ : List[str] = True
snake_case__ : List[str] = 768
snake_case__ : Optional[int] = [1, 1, 1, 0.5]
snake_case__ : Optional[Any] = 150
snake_case__ : int = 16
snake_case__ : Any = 'huggingface/label-files'
snake_case__ : Optional[Any] = 'ade20k-id2label.json'
snake_case__ : Tuple = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
snake_case__ : List[str] = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ : int = idalabel
snake_case__ : List[Any] = {v: k for k, v in idalabel.items()}
snake_case__ : Tuple = [1, 150, 480, 480]
return config, expected_shape
def _lowerCAmelCase ( __lowerCAmelCase ) -> Dict:
"""simple docstring"""
snake_case__ : Union[str, Any] = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase ) -> Any:
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
snake_case__ : Union[str, Any] = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
snake_case__ : str = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
snake_case__ : Union[str, Any] = name.replace('''patch_embed''' , '''''' )
if "pos_embed" in name:
snake_case__ : Union[str, Any] = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
snake_case__ : Union[str, Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
snake_case__ : Optional[Any] = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
snake_case__ : Optional[int] = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
snake_case__ : int = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
snake_case__ : Any = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name and "backbone" not in name:
snake_case__ : Tuple = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
snake_case__ : Union[str, Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
snake_case__ : List[str] = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
snake_case__ : Any = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
snake_case__ : List[Any] = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
snake_case__ : Optional[Any] = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
snake_case__ : List[str] = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
snake_case__ : Dict = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
snake_case__ : List[str] = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
snake_case__ : Optional[Any] = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
snake_case__ : Dict = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
snake_case__ : str = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
snake_case__ : Union[str, Any] = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
snake_case__ : Optional[Any] = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
snake_case__ : Tuple = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
snake_case__ : str = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
snake_case__ : Dict = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
snake_case__ : Tuple = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
snake_case__ : Optional[int] = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
snake_case__ : Any = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
snake_case__ : Optional[int] = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
snake_case__ : str = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
snake_case__ : str = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
snake_case__ : Tuple = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
snake_case__ : str = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
snake_case__ : Optional[int] = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
snake_case__ : List[str] = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
snake_case__ : List[str] = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
snake_case__ : Optional[Any] = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
snake_case__ : Optional[int] = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
snake_case__ : List[Any] = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
if "backbone" in name:
snake_case__ : List[str] = name.replace('''backbone''' , '''backbone.bit.encoder''' )
if ".." in name:
snake_case__ : List[str] = name.replace('''..''' , '''.''' )
if "stem.conv" in name:
snake_case__ : Any = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
snake_case__ : List[str] = name.replace('''blocks''' , '''layers''' )
if "convolution" in name and "backbone" in name:
snake_case__ : int = name.replace('''convolution''' , '''conv''' )
if "layer" in name and "backbone" in name:
snake_case__ : Optional[int] = name.replace('''layer''' , '''layers''' )
if "backbone.bit.encoder.bit" in name:
snake_case__ : Union[str, Any] = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' )
if "embedder.conv" in name:
snake_case__ : int = name.replace('''embedder.conv''' , '''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
snake_case__ : str = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' )
return name
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : Any = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
snake_case__ : Optional[int] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Any = in_proj_weight[: config.hidden_size, :]
snake_case__ : Dict = in_proj_bias[: config.hidden_size]
snake_case__ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : str = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
snake_case__ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case__ : Tuple = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : int = get_dpt_config(__lowerCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
snake_case__ : Any = torch.load(__lowerCAmelCase , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(__lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
snake_case__ : int = state_dict.pop(__lowerCAmelCase )
snake_case__ : List[Any] = val
# read in qkv matrices
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
snake_case__ : Optional[Any] = DPTForSemanticSegmentation(__lowerCAmelCase ) if 'ade' in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# Check outputs on an image
snake_case__ : str = 480 if 'ade' in checkpoint_url else 384
snake_case__ : Dict = DPTImageProcessor(size=__lowerCAmelCase )
snake_case__ : int = prepare_img()
snake_case__ : Optional[int] = image_processor(__lowerCAmelCase , return_tensors='''pt''' )
# forward pass
snake_case__ : List[Any] = model(**__lowerCAmelCase ).logits if 'ade' in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth
if show_prediction:
snake_case__ : str = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=__lowerCAmelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
parser.add_argument(
'''--show_prediction''',
action='''store_true''',
)
A__ = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 717 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class a :
def __init__( self :List[Any] ,__lowercase :Tuple ,__lowercase :List[Any]=1_3 ,__lowercase :List[Any]=7 ,__lowercase :int=True ,__lowercase :int=True ,__lowercase :Tuple=True ,__lowercase :int=True ,__lowercase :Dict=9_9 ,__lowercase :Any=3_2 ,__lowercase :Tuple=2 ,__lowercase :Union[str, Any]=4 ,__lowercase :Tuple=3_7 ,__lowercase :int="gelu" ,__lowercase :int=0.1 ,__lowercase :Dict=0.1 ,__lowercase :Optional[Any]=5_1_2 ,__lowercase :Optional[Any]=1_6 ,__lowercase :Optional[int]=2 ,__lowercase :Optional[int]=0.02 ,__lowercase :str=3 ,__lowercase :int=4 ,__lowercase :List[str]=None ,__lowercase :Union[str, Any]=0 ,):
snake_case__ : List[str] = parent
snake_case__ : int = batch_size
snake_case__ : Any = seq_length
snake_case__ : List[Any] = is_training
snake_case__ : str = use_input_mask
snake_case__ : str = use_token_type_ids
snake_case__ : Dict = use_labels
snake_case__ : Tuple = vocab_size
snake_case__ : Any = hidden_size
snake_case__ : str = num_hidden_layers
snake_case__ : int = num_attention_heads
snake_case__ : Tuple = intermediate_size
snake_case__ : int = hidden_act
snake_case__ : Tuple = hidden_dropout_prob
snake_case__ : Optional[Any] = attention_probs_dropout_prob
snake_case__ : List[Any] = max_position_embeddings
snake_case__ : str = type_vocab_size
snake_case__ : Tuple = type_sequence_label_size
snake_case__ : Any = initializer_range
snake_case__ : List[str] = num_labels
snake_case__ : str = num_choices
snake_case__ : Optional[Any] = scope
snake_case__ : str = projection_dim
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case__ : List[Any] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
snake_case__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Any = None
if self.use_token_type_ids:
snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
snake_case__ : List[Any] = None
snake_case__ : Optional[Any] = None
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
snake_case__ : Optional[Any] = ids_tensor([self.batch_size] ,self.num_choices )
snake_case__ : Optional[int] = BertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowercase ,initializer_range=self.initializer_range ,)
snake_case__ : Optional[int] = DPRConfig(projection_dim=self.projection_dim ,**config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self :Tuple ,__lowercase :List[Any] ,__lowercase :Union[str, Any] ,__lowercase :Optional[int] ,__lowercase :Any ,__lowercase :Optional[int] ,__lowercase :Any ,__lowercase :Tuple ):
snake_case__ : List[str] = TFDPRContextEncoder(config=__lowercase )
snake_case__ : Optional[int] = model(__lowercase ,attention_mask=__lowercase ,token_type_ids=__lowercase )
snake_case__ : Optional[int] = model(__lowercase ,token_type_ids=__lowercase )
snake_case__ : Dict = model(__lowercase )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.projection_dim or self.hidden_size) )
def __lowerCamelCase ( self :Any ,__lowercase :List[str] ,__lowercase :List[str] ,__lowercase :Optional[Any] ,__lowercase :int ,__lowercase :List[Any] ,__lowercase :List[Any] ,__lowercase :Union[str, Any] ):
snake_case__ : Dict = TFDPRQuestionEncoder(config=__lowercase )
snake_case__ : Any = model(__lowercase ,attention_mask=__lowercase ,token_type_ids=__lowercase )
snake_case__ : int = model(__lowercase ,token_type_ids=__lowercase )
snake_case__ : Optional[int] = model(__lowercase )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.projection_dim or self.hidden_size) )
def __lowerCamelCase ( self :Optional[int] ,__lowercase :List[Any] ,__lowercase :str ,__lowercase :Tuple ,__lowercase :Any ,__lowercase :Optional[Any] ,__lowercase :List[Any] ,__lowercase :Tuple ):
snake_case__ : int = TFDPRReader(config=__lowercase )
snake_case__ : Any = model(__lowercase ,attention_mask=__lowercase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape ,(self.batch_size,) )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : str = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Optional[int] = config_and_inputs
snake_case__ : Optional[Any] = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class a ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Tuple = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__lowerCAmelCase : List[str] = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : int = False
__lowerCAmelCase : Optional[int] = False
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : str = TFDPRModelTester(self )
snake_case__ : Union[str, Any] = ConfigTester(self ,config_class=__lowercase ,hidden_size=3_7 )
def __lowerCamelCase ( self :List[str] ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__lowercase )
def __lowerCamelCase ( self :List[str] ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__lowercase )
def __lowerCamelCase ( self :str ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__lowercase )
@slow
def __lowerCamelCase ( self :Union[str, Any] ):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Union[str, Any] = TFDPRContextEncoder.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Tuple = TFDPRContextEncoder.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Tuple = TFDPRQuestionEncoder.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Optional[int] = TFDPRReader.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@require_tf
class a ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self :List[str] ):
snake_case__ : str = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
snake_case__ : Optional[int] = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP]
snake_case__ : Union[str, Any] = model(__lowercase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
snake_case__ : Optional[Any] = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
] )
self.assertTrue(numpy.allclose(output[:, :1_0].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 219 | 0 |
class __a :
"""simple docstring"""
def __init__( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =0
SCREAMING_SNAKE_CASE__ =0
SCREAMING_SNAKE_CASE__ ={}
def __A ( self : Tuple ,_UpperCamelCase : int ) -> Optional[int]:
'''simple docstring'''
if vertex not in self.adjacency:
SCREAMING_SNAKE_CASE__ ={}
self.num_vertices += 1
def __A ( self : str ,_UpperCamelCase : str ,_UpperCamelCase : Tuple ,_UpperCamelCase : Any ) -> Any:
'''simple docstring'''
self.add_vertex(_UpperCamelCase )
self.add_vertex(_UpperCamelCase )
if head == tail:
return
SCREAMING_SNAKE_CASE__ =weight
SCREAMING_SNAKE_CASE__ =weight
def __A ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.get_edges()
for edge in edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =edge
edges.remove((tail, head, weight) )
for i in range(len(_UpperCamelCase ) ):
SCREAMING_SNAKE_CASE__ =list(edges[i] )
edges.sort(key=lambda _UpperCamelCase : e[2] )
for i in range(len(_UpperCamelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
SCREAMING_SNAKE_CASE__ =edges[i][2] + 1
for edge in edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =edge
SCREAMING_SNAKE_CASE__ =weight
SCREAMING_SNAKE_CASE__ =weight
def __str__( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =""""""
for tail in self.adjacency:
for head in self.adjacency[tail]:
SCREAMING_SNAKE_CASE__ =self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip("""\n""" )
def __A ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __A ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def __A ( _UpperCamelCase : Union[str, Any]=None ,_UpperCamelCase : List[Any]=None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =Graph()
if vertices is None:
SCREAMING_SNAKE_CASE__ =[]
if edges is None:
SCREAMING_SNAKE_CASE__ =[]
for vertex in vertices:
g.add_vertex(_UpperCamelCase )
for edge in edges:
g.add_edge(*_UpperCamelCase )
return g
class __a :
"""simple docstring"""
def __init__( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ={}
SCREAMING_SNAKE_CASE__ ={}
def __len__( self : int ) -> Optional[int]:
'''simple docstring'''
return len(self.parent )
def __A ( self : Any ,_UpperCamelCase : Any ) -> Optional[int]:
'''simple docstring'''
if item in self.parent:
return self.find(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =item
SCREAMING_SNAKE_CASE__ =0
return item
def __A ( self : Union[str, Any] ,_UpperCamelCase : Tuple ) -> List[Any]:
'''simple docstring'''
if item not in self.parent:
return self.make_set(_UpperCamelCase )
if item != self.parent[item]:
SCREAMING_SNAKE_CASE__ =self.find(self.parent[item] )
return self.parent[item]
def __A ( self : Any ,_UpperCamelCase : Any ,_UpperCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.find(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =self.find(_UpperCamelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
SCREAMING_SNAKE_CASE__ =roota
return roota
if self.rank[roota] < self.rank[roota]:
SCREAMING_SNAKE_CASE__ =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
SCREAMING_SNAKE_CASE__ =roota
return roota
return None
@staticmethod
def __A ( _UpperCamelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =graph.num_vertices
SCREAMING_SNAKE_CASE__ =Graph.UnionFind()
SCREAMING_SNAKE_CASE__ =[]
while num_components > 1:
SCREAMING_SNAKE_CASE__ ={}
for vertex in graph.get_vertices():
SCREAMING_SNAKE_CASE__ =-1
SCREAMING_SNAKE_CASE__ =graph.get_edges()
for edge in edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =edge
edges.remove((tail, head, weight) )
for edge in edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =edge
SCREAMING_SNAKE_CASE__ =union_find.find(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =union_find.find(_UpperCamelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
SCREAMING_SNAKE_CASE__ =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
SCREAMING_SNAKE_CASE__ =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =cheap_edge[vertex]
if union_find.find(_UpperCamelCase ) != union_find.find(_UpperCamelCase ):
union_find.union(_UpperCamelCase ,_UpperCamelCase )
mst_edges.append(cheap_edge[vertex] )
SCREAMING_SNAKE_CASE__ =num_components - 1
SCREAMING_SNAKE_CASE__ =Graph.build(edges=_UpperCamelCase )
return mst
| 151 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def UpperCAmelCase_ ( __UpperCamelCase="" ):
SCREAMING_SNAKE_CASE__ =tempfile.mkdtemp()
return os.path.join(__UpperCamelCase, str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class __a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =torch.rand(1_2 ,dtype=torch.floataa ) - 0.5
SCREAMING_SNAKE_CASE__ =AgentAudio(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(_UpperCamelCase ,agent_type.to_raw() ,atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(_UpperCamelCase ) )
# Ensure that the file contains the same value as the original tensor
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =sf.read(_UpperCamelCase )
self.assertTrue(torch.allclose(_UpperCamelCase ,torch.tensor(_UpperCamelCase ) ,atol=1e-4 ) )
def __A ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =torch.rand(1_2 ,dtype=torch.floataa ) - 0.5
SCREAMING_SNAKE_CASE__ =get_new_path(suffix=""".wav""" )
sf.write(_UpperCamelCase ,_UpperCamelCase ,1_6_0_0_0 )
SCREAMING_SNAKE_CASE__ =AgentAudio(_UpperCamelCase )
self.assertTrue(torch.allclose(_UpperCamelCase ,agent_type.to_raw() ,atol=1e-4 ) )
self.assertEqual(agent_type.to_string() ,_UpperCamelCase )
@require_vision
@require_torch
class __a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =torch.randint(0 ,2_5_6 ,(6_4, 6_4, 3) )
SCREAMING_SNAKE_CASE__ =AgentImage(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(_UpperCamelCase ,agent_type._tensor ,atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() ,Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_UpperCamelCase ) )
def __A ( self : Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
SCREAMING_SNAKE_CASE__ =Image.open(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =AgentImage(_UpperCamelCase )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_UpperCamelCase ) )
def __A ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
SCREAMING_SNAKE_CASE__ =Image.open(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =AgentImage(_UpperCamelCase )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_UpperCamelCase ) )
class __a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ="""Hey!"""
SCREAMING_SNAKE_CASE__ =AgentText(_UpperCamelCase )
self.assertEqual(_UpperCamelCase ,agent_type.to_string() )
self.assertEqual(_UpperCamelCase ,agent_type.to_raw() )
self.assertEqual(_UpperCamelCase ,_UpperCamelCase )
| 151 | 1 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _SCREAMING_SNAKE_CASE( _snake_case ):
A_ : Dict = ['image_processor', 'tokenizer']
A_ : Optional[Any] = 'CLIPImageProcessor'
A_ : Union[str, Any] = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self : int , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE__ :Optional[int] = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE__ :Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__( self : Optional[Any] , UpperCamelCase_ : str=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : List[str] ) -> Optional[Any]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
SCREAMING_SNAKE_CASE__ :int = self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if images is not None:
SCREAMING_SNAKE_CASE__ :Tuple = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ :Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ )
def __lowerCamelCase ( self : Optional[Any] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : Optional[Any] ) -> Any:
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __lowerCamelCase ( self : Optional[Any] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : str ) -> Union[str, Any]:
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __lowerCamelCase ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :str = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ :Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 701 | '''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _SCREAMING_SNAKE_CASE:
A_ : int
A_ : int
class _SCREAMING_SNAKE_CASE:
def __init__( self : str , UpperCamelCase_ : int ) -> Any:
SCREAMING_SNAKE_CASE__ :list[list[Edge]] = [[] for _ in range(UpperCamelCase_ )]
SCREAMING_SNAKE_CASE__ :List[Any] = size
def __getitem__( self : Optional[Any] , UpperCamelCase_ : int ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def __lowerCamelCase ( self : Optional[int] ) -> Any:
return self._size
def __lowerCamelCase ( self : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> Dict:
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(UpperCamelCase_ , UpperCamelCase_ ) )
def __lowerCamelCase ( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> int | None:
SCREAMING_SNAKE_CASE__ :int = deque([start_vertex] )
SCREAMING_SNAKE_CASE__ :list[int | None] = [None] * self.size
SCREAMING_SNAKE_CASE__ :List[str] = 0
while queue:
SCREAMING_SNAKE_CASE__ :Any = queue.popleft()
SCREAMING_SNAKE_CASE__ :Union[str, Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
SCREAMING_SNAKE_CASE__ :Tuple = current_distance + edge.weight
SCREAMING_SNAKE_CASE__ :str = distances[edge.destination_vertex]
if (
isinstance(UpperCamelCase_ , UpperCamelCase_ )
and new_distance >= dest_vertex_distance
):
continue
SCREAMING_SNAKE_CASE__ :Optional[int] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 320 | 0 |
"""simple docstring"""
def __lowerCamelCase ( a_ : int ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
lowerCamelCase_ = int(input("Enter number: ").strip())
print(f'{number} is {"" if perfect(number) else "not "}a Perfect Number.') | 498 |
'''simple docstring'''
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowerCamelCase_ = 1.0_5457_1817e-34 # unit of ℏ : J * s
lowerCamelCase_ = 3e8 # unit of c : m * s^-1
def SCREAMING_SNAKE_CASE_ ( __A : float , __A : float , __A : float ) -> dict[str, float]:
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
_SCREAMING_SNAKE_CASE = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_40 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_SCREAMING_SNAKE_CASE = (2_40 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_SCREAMING_SNAKE_CASE = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_40 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 418 | 0 |
'''simple docstring'''
def __A ( lowerCamelCase_ ):
"""simple docstring"""
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
SCREAMING_SNAKE_CASE : Optional[int] = 4
SCREAMING_SNAKE_CASE : Tuple = (1 << p) - 1
for _ in range(p - 2 ):
SCREAMING_SNAKE_CASE : Optional[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 79 |
'''simple docstring'''
__UpperCAmelCase = [
"""Audio""",
"""Array2D""",
"""Array3D""",
"""Array4D""",
"""Array5D""",
"""ClassLabel""",
"""Features""",
"""Sequence""",
"""Value""",
"""Image""",
"""Translation""",
"""TranslationVariableLanguages""",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 79 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Union[str, Any] = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : int=False ):
__lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Optional[int]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCAmelCase = ''
else:
__lowerCAmelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase = in_proj_bias[: config.hidden_size]
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def a_ ( lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : int, lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = dct.pop(lowerCAmelCase_ )
__lowerCAmelCase = val
def a_ ( ):
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Optional[Any]=True ):
__lowerCAmelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
__lowerCAmelCase = 8
# set labels if required
if not base_model:
__lowerCAmelCase = 1000
__lowerCAmelCase = 'huggingface/label-files'
__lowerCAmelCase = 'imagenet-1k-id2label.json'
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
__lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__lowerCAmelCase = 384
__lowerCAmelCase = 1536
__lowerCAmelCase = 12
__lowerCAmelCase = 6
# load original model from torch hub
__lowerCAmelCase = torch.hub.load('facebookresearch/dino:main', lowerCAmelCase_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase = original_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase_ )
__lowerCAmelCase = create_rename_keys(lowerCAmelCase_, base_model=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# load HuggingFace model
if base_model:
__lowerCAmelCase = ViTModel(lowerCAmelCase_, add_pooling_layer=lowerCAmelCase_ ).eval()
else:
__lowerCAmelCase = ViTForImageClassification(lowerCAmelCase_ ).eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor
__lowerCAmelCase = ViTImageProcessor()
__lowerCAmelCase = image_processor(images=prepare_img(), return_tensors='pt' )
__lowerCAmelCase = encoding['pixel_values']
__lowerCAmelCase = model(lowerCAmelCase_ )
if base_model:
__lowerCAmelCase = original_model(lowerCAmelCase_ )
assert torch.allclose(lowerCAmelCase_, outputs.last_hidden_state[:, 0, :], atol=1E-1 )
else:
__lowerCAmelCase = original_model(lowerCAmelCase_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase_, outputs.logits, atol=1E-3 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
_snake_case : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 53 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Dict=3_2 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Union[str, Any]=1_0 , lowerCAmelCase_ : List[str]=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_ : Optional[int]=[1, 1, 2, 1] , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple="relu" , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Optional[int]=None , ) -> int:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = embeddings_size
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_labels
__lowerCAmelCase = scope
__lowerCAmelCase = len(lowerCAmelCase_ )
def lowercase ( self : Optional[int] ) -> List[Any]:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = self.get_config()
return config, pixel_values
def lowercase ( self : Tuple ) -> List[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] ) -> str:
__lowerCAmelCase = FlaxRegNetModel(config=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowercase ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int ) -> Tuple:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = FlaxRegNetForImageClassification(config=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
a_ = False
a_ = False
a_ = False
def lowercase ( self : Dict ) -> None:
__lowerCAmelCase = FlaxRegNetModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def lowercase ( self : int ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : str ) -> Union[str, Any]:
return
def lowercase ( self : Dict ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def lowercase ( self : Union[str, Any] ) -> Any:
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def lowercase ( self : Tuple ) -> Tuple:
pass
def lowercase ( self : Optional[Any] ) -> str:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> Union[str, Any]:
def check_hidden_states_output(lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : str ) -> str:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = model_class(lowerCAmelCase_ )
@jax.jit
def model_jitted(lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Dict ):
return model(pixel_values=lowerCAmelCase_ , **lowerCAmelCase_ )
with self.subTest('JIT Enabled' ):
__lowerCAmelCase = model_jitted(**lowerCAmelCase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCAmelCase = model_jitted(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def a_ ( ):
__lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def lowercase ( self : Optional[Any] ) -> Union[str, Any]:
__lowerCAmelCase = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors='np' )
__lowerCAmelCase = model(**lowerCAmelCase_ )
# verify the logits
__lowerCAmelCase = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase : Dict = {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] = [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
__lowercase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 315 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowercase : Union[str, Any] = 16
__lowercase : int = 32
def lowercase ( __A : Optional[Any] ) -> Dict:
'''simple docstring'''
return int(x / 2**20 )
class _A :
'''simple docstring'''
def __enter__( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
snake_case : Union[str, Any] = torch.cuda.memory_allocated()
return self
def __exit__( self ,*SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
snake_case : int = torch.cuda.memory_allocated()
snake_case : str = torch.cuda.max_memory_allocated()
snake_case : List[Any] = bamb(self.end - self.begin )
snake_case : str = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowercase ( __A : Accelerator , __A : int = 16 , __A : str = "bert-base-cased" , __A : int = 320 , __A : int = 160 , ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[Any] = AutoTokenizer.from_pretrained(__A )
snake_case : Optional[Any] = load_dataset(
"""glue""" , """mrpc""" , split={"""train""": f"""train[:{n_train}]""", """validation""": f"""validation[:{n_val}]"""} )
def tokenize_function(__A : List[str] ):
# max_length=None => use the model max length (it's actually the default)
snake_case : Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__A , max_length=__A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case : Dict = datasets.map(
__A , batched=__A , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=__A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case : List[str] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__A : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__A , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(__A , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
snake_case : Dict = DataLoader(
tokenized_datasets["""train"""] , shuffle=__A , collate_fn=__A , batch_size=__A )
snake_case : Optional[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__A , collate_fn=__A , batch_size=__A )
return train_dataloader, eval_dataloader
def lowercase ( __A : Optional[Any] , __A : Optional[Any] ) -> Any:
'''simple docstring'''
snake_case : int = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case : Any = config["""lr"""]
snake_case : Tuple = int(config["""num_epochs"""] )
snake_case : Union[str, Any] = int(config["""seed"""] )
snake_case : List[Any] = int(config["""batch_size"""] )
snake_case : Optional[int] = args.model_name_or_path
set_seed(__A )
snake_case , snake_case : Any = get_dataloaders(__A , __A , __A , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case : List[str] = AutoModelForSequenceClassification.from_pretrained(__A , return_dict=__A )
# Instantiate optimizer
snake_case : Optional[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case : Union[str, Any] = optimizer_cls(params=model.parameters() , lr=__A )
if accelerator.state.deepspeed_plugin is not None:
snake_case : Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
snake_case : List[Any] = 1
snake_case : Tuple = (len(__A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case : Optional[int] = get_linear_schedule_with_warmup(
optimizer=__A , num_warmup_steps=0 , num_training_steps=__A , )
else:
snake_case : int = DummyScheduler(__A , total_num_steps=__A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case , snake_case , snake_case , snake_case , snake_case : int = accelerator.prepare(
__A , __A , __A , __A , __A )
# We need to keep track of how many total steps we have iterated over
snake_case : Optional[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case : Optional[int] = 0
# Now we train the model
snake_case : Optional[int] = {}
for epoch in range(__A , __A ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__A ):
snake_case : Dict = model(**__A )
snake_case : str = outputs.loss
snake_case : Any = loss / gradient_accumulation_steps
accelerator.backward(__A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) )
accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) )
accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) )
accelerator.print(
"""Total Peak Memory consumed during the train (max): {}""".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
snake_case : Dict = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f:
json.dump(__A , __A )
def lowercase ( ) -> Tuple:
'''simple docstring'''
snake_case : int = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=__A , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=__A , )
parser.add_argument(
"""--output_dir""" , type=__A , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--peak_memory_upper_bound""" , type=__A , default=__A , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , )
parser.add_argument(
"""--n_train""" , type=__A , default=320 , help="""Number of training examples to use.""" , )
parser.add_argument(
"""--n_val""" , type=__A , default=160 , help="""Number of validation examples to use.""" , )
parser.add_argument(
"""--num_epochs""" , type=__A , default=1 , help="""Number of train epochs.""" , )
snake_case : Optional[Any] = parser.parse_args()
snake_case : Optional[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 315 | 1 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ (SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowercase : str = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , **_a:str ):
snake_case__ = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCAmelCase_ )
return config
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( self:str ):
self.check_over_configs(thresholding=UpperCAmelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , sample_max_value=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1e-5
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCAmelCase_ )
snake_case__ = len(UpperCAmelCase_ )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter
snake_case__ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase_ ) ):
# 1. predict noise residual
snake_case__ = model(UpperCAmelCase_ , UpperCAmelCase_ )
# 2. predict previous mean of sample x_t-1
snake_case__ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
snake_case__ = pred_prev_sample
snake_case__ = torch.sum(torch.abs(UpperCAmelCase_ ) )
snake_case__ = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config(prediction_type='''v_prediction''' )
snake_case__ = scheduler_class(**UpperCAmelCase_ )
snake_case__ = len(UpperCAmelCase_ )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter
snake_case__ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase_ ) ):
# 1. predict noise residual
snake_case__ = model(UpperCAmelCase_ , UpperCAmelCase_ )
# 2. predict previous mean of sample x_t-1
snake_case__ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
snake_case__ = pred_prev_sample
snake_case__ = torch.sum(torch.abs(UpperCAmelCase_ ) )
snake_case__ = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCAmelCase_ )
snake_case__ = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase_ )
snake_case__ = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase_ ):
if i == len(UpperCAmelCase_ ) - 1:
snake_case__ = -1
else:
snake_case__ = timesteps[i + 1]
snake_case__ = scheduler.previous_timestep(UpperCAmelCase_ )
snake_case__ = prev_t.item()
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCAmelCase_ )
snake_case__ = [1_00, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCAmelCase_ )
snake_case__ = [1_00, 87, 50, 1, 0]
snake_case__ = len(UpperCAmelCase_ )
with self.assertRaises(UpperCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase_ , timesteps=UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCAmelCase_ )
snake_case__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase_ )
| 33 |
'''simple docstring'''
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=7_68 ):
super().__init__(UpperCAmelCase_ )
snake_case_ = proj_size
snake_case_ = CLIPVisionModel(UpperCAmelCase_ )
snake_case_ = PaintByExampleMapper(UpperCAmelCase_ )
snake_case_ = nn.LayerNorm(config.hidden_size )
snake_case_ = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
snake_case_ = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_=False ):
snake_case_ = self.model(pixel_values=UpperCAmelCase_ )
snake_case_ = clip_output.pooler_output
snake_case_ = self.mapper(latent_states[:, None] )
snake_case_ = self.final_layer_norm(UpperCAmelCase_ )
snake_case_ = self.proj_out(UpperCAmelCase_ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase_ ):
super().__init__()
snake_case_ = (config.num_hidden_layers + 1) // 5
snake_case_ = config.hidden_size
snake_case_ = 1
snake_case_ = nn.ModuleList(
[
BasicTransformerBlock(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , activation_fn="gelu" , attention_bias=UpperCAmelCase_ )
for _ in range(UpperCAmelCase_ )
] )
def _lowercase ( self , UpperCAmelCase_ ):
for block in self.blocks:
snake_case_ = block(UpperCAmelCase_ )
return hidden_states
| 508 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
A_ : str = (DDIMParallelScheduler,)
A_ : Optional[int] = (('eta', 0.0), ('num_inference_steps', 50))
def a (self : Union[str, Any] , **a__ : int ):
"""simple docstring"""
__snake_case = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**a__ )
return config
def a (self : Any , **a__ : Optional[Any] ):
"""simple docstring"""
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(**a__ )
__snake_case = scheduler_class(**a__ )
__snake_case = 10, 0.0
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
for t in scheduler.timesteps:
__snake_case = model(a__ , a__ )
__snake_case = scheduler.step(a__ , a__ , a__ , a__ ).prev_sample
return sample
def a (self : Union[str, Any] ):
"""simple docstring"""
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=a__ )
def a (self : Union[str, Any] ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a__ )
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(steps_offset=1 )
__snake_case = scheduler_class(**a__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def a (self : str ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=a__ , beta_end=a__ )
def a (self : Tuple ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a__ )
def a (self : List[Any] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__ )
def a (self : List[str] ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=a__ )
def a (self : str ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=a__ )
def a (self : Union[str, Any] ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=a__ )
def a (self : List[str] ):
"""simple docstring"""
self.check_over_configs(thresholding=a__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=a__ , prediction_type=a__ , sample_max_value=a__ , )
def a (self : Union[str, Any] ):
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=a__ )
def a (self : Union[str, Any] ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=a__ , num_inference_steps=a__ )
def a (self : Optional[int] ):
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=a__ , eta=a__ )
def a (self : str ):
"""simple docstring"""
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**a__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_4_7_7_1 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_2_4_6_0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.0_2 ) ) < 1E-5
def a (self : List[str] ):
"""simple docstring"""
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**a__ )
__snake_case = 10, 0.0
scheduler.set_timesteps(a__ )
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter
__snake_case = self.dummy_sample_deter + 0.1
__snake_case = self.dummy_sample_deter - 0.1
__snake_case = samplea.shape[0]
__snake_case = torch.stack([samplea, samplea, samplea] , dim=0 )
__snake_case = torch.arange(a__ )[0:3, None].repeat(1 , a__ )
__snake_case = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__snake_case = scheduler.batch_step_no_noise(a__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , a__ )
__snake_case = torch.sum(torch.abs(a__ ) )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 1_1_4_7.7_9_0_4 ) < 1E-2
assert abs(result_mean.item() - 0.4_9_8_2 ) < 1E-3
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = self.full_loop()
__snake_case = torch.sum(torch.abs(a__ ) )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 1_7_2.0_0_6_7 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_9_6_7 ) < 1E-3
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.full_loop(prediction_type='''v_prediction''' )
__snake_case = torch.sum(torch.abs(a__ ) )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 5_2.5_3_0_2 ) < 1E-2
assert abs(result_mean.item() - 0.0_6_8_4 ) < 1E-3
def a (self : int ):
"""simple docstring"""
__snake_case = self.full_loop(set_alpha_to_one=a__ , beta_start=0.0_1 )
__snake_case = torch.sum(torch.abs(a__ ) )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 1_4_9.8_2_9_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_9_5_1 ) < 1E-3
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.full_loop(set_alpha_to_one=a__ , beta_start=0.0_1 )
__snake_case = torch.sum(torch.abs(a__ ) )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 1_4_9.0_7_8_4 ) < 1E-2
assert abs(result_mean.item() - 0.1_9_4_1 ) < 1E-3
| 719 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A_ : List[str] = inspect.getfile(accelerate.test_utils )
A_ : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] )
A_ : int = ['accelerate', 'launch']
A_ : Tuple = Path.home() / '.cache/huggingface/accelerate'
A_ : List[Any] = 'default_config.yaml'
A_ : Optional[Any] = config_folder / config_file
A_ : Union[str, Any] = config_folder / '_default_config.yaml'
A_ : int = Path('tests/test_configs' )
@classmethod
def a (cls : Any ):
"""simple docstring"""
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def a (cls : List[str] ):
"""simple docstring"""
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def a (self : str ):
"""simple docstring"""
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=a__ ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(a__ ), self.test_file_path] , env=os.environ.copy() )
def a (self : int ):
"""simple docstring"""
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A_ : List[Any] = 'test-tpu'
A_ : List[str] = 'us-central1-a'
A_ : int = 'ls'
A_ : Tuple = ['accelerate', 'tpu-config']
A_ : Union[str, Any] = 'cd /usr/share'
A_ : int = 'tests/test_samples/test_command_file.sh'
A_ : int = 'Running gcloud compute tpus tpu-vm ssh'
def a (self : Any ):
"""simple docstring"""
__snake_case = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=a__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a__ , )
def a (self : str ):
"""simple docstring"""
__snake_case = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=a__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a__ , )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=a__ )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a__ , )
def a (self : str ):
"""simple docstring"""
__snake_case = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=a__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a__ , )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=a__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , a__ , )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=a__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a__ , )
def a (self : Any ):
"""simple docstring"""
__snake_case = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=a__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a__ , )
def a (self : Any ):
"""simple docstring"""
__snake_case = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=a__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a__ , )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=a__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a__ , )
| 388 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Dict = 'data2vec-text'
def __init__( self , _UpperCamelCase=30522 , _UpperCamelCase=768 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase=3072 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=2 , _UpperCamelCase=0.0_2 , _UpperCamelCase=1E-1_2 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , _UpperCamelCase="absolute" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
_lowercase : str = vocab_size
_lowercase : str = hidden_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : Optional[int] = hidden_act
_lowercase : str = intermediate_size
_lowercase : List[str] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : Optional[int] = initializer_range
_lowercase : Tuple = layer_norm_eps
_lowercase : int = position_embedding_type
_lowercase : str = use_cache
_lowercase : Dict = classifier_dropout
class a__ ( lowerCamelCase_ ):
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
_lowercase : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowercase : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 245 |
'''simple docstring'''
def _A ( snake_case = 60_08_51_47_51_43 ) -> int:
try:
_lowercase : str = int(snake_case )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
_lowercase : Union[str, Any] = 2
_lowercase : Dict = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
_lowercase : Optional[Any] = i
while n % i == 0:
_lowercase : Dict = n // i
i += 1
return int(snake_case )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 245 | 1 |
from __future__ import annotations
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = 0
lowerCamelCase__ = len(_lowerCamelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowerCamelCase__ = i + 1
else:
lowerCamelCase__ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 718 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_a = logging.get_logger(__name__)
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 29 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE : str = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
A_ = state_dict.pop(__UpperCamelCase )
A_ = val
def lowerCamelCase_ ( __UpperCamelCase ):
A_ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
A_ = value
else:
A_ = value
return new_state_dict
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase=False ):
A_ = ''''''
if is_panoptic:
A_ = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
A_ = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
A_ = in_proj_weight[:2_56, :]
A_ = in_proj_bias[:2_56]
A_ = in_proj_weight[2_56:5_12, :]
A_ = in_proj_bias[2_56:5_12]
A_ = in_proj_weight[-2_56:, :]
A_ = in_proj_bias[-2_56:]
def lowerCamelCase_ ( ):
A_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A_ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase ):
A_ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
A_ = '''resnet101'''
if "dc5" in model_name:
A_ = True
A_ = '''panoptic''' in model_name
if is_panoptic:
A_ = 2_50
else:
A_ = 91
A_ = '''huggingface/label-files'''
A_ = '''coco-detection-id2label.json'''
A_ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
# load image processor
A_ = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
A_ = ConditionalDetrImageProcessor(format=__UpperCamelCase )
# prepare image
A_ = prepare_img()
A_ = image_processor(images=__UpperCamelCase , return_tensors='''pt''' )
A_ = encoding['''pixel_values''']
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
A_ = torch.hub.load('''DeppMeng/ConditionalDETR''' , __UpperCamelCase , pretrained=__UpperCamelCase ).eval()
A_ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
A_ = '''conditional_detr.''' + src
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A_ = rename_backbone_keys(__UpperCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__UpperCamelCase , is_panoptic=__UpperCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
A_ = state_dict.pop(__UpperCamelCase )
A_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
A_ = state_dict.pop(__UpperCamelCase )
A_ = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
A_ = state_dict.pop(__UpperCamelCase )
A_ = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
A_ = state_dict.pop(__UpperCamelCase )
A_ = val
# finally, create HuggingFace model and load state dict
A_ = ConditionalDetrForSegmentation(__UpperCamelCase ) if is_panoptic else ConditionalDetrForObjectDetection(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
model.push_to_hub(repo_id=__UpperCamelCase , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
A_ = conditional_detr(__UpperCamelCase )
A_ = model(__UpperCamelCase )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 141 |
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__UpperCamelCase , n - 1 , __UpperCamelCase ) * a) % mod
else:
A_ = binary_exponentiation(__UpperCamelCase , n / 2 , __UpperCamelCase )
return (b * b) % mod
# a prime number
SCREAMING_SNAKE_CASE : str = 701
SCREAMING_SNAKE_CASE : int = 10_0000_0000
SCREAMING_SNAKE_CASE : Optional[Any] = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p) | 141 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _UpperCAmelCase ( ) -> Generator[int, None, None]:
_snake_case = {}
_snake_case = 2
while True:
_snake_case = factor_map.pop(__lowerCamelCase , __lowerCamelCase )
if factor:
_snake_case = factor + prime
while x in factor_map:
x += factor
_snake_case = factor
else:
_snake_case = prime
yield prime
prime += 1
def _UpperCAmelCase ( __lowerCamelCase : float = 1E1_0 ) -> int:
_snake_case = sieve()
_snake_case = 1
while True:
_snake_case = next(__lowerCamelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__lowerCamelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 430 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _UpperCAmelCase ( ) -> Generator[int, None, None]:
_snake_case = {}
_snake_case = 2
while True:
_snake_case = factor_map.pop(__lowerCamelCase , __lowerCamelCase )
if factor:
_snake_case = factor + prime
while x in factor_map:
x += factor
_snake_case = factor
else:
_snake_case = prime
yield prime
prime += 1
def _UpperCAmelCase ( __lowerCamelCase : float = 1E1_0 ) -> int:
_snake_case = sieve()
_snake_case = 1
while True:
_snake_case = next(__lowerCamelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__lowerCamelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 430 | 1 |
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] )-> List[str]:
'''simple docstring'''
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(snake_case__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(snake_case__ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 438 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCamelCase (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ = torch.nn.Linear(1_0 , 1_0 )
SCREAMING_SNAKE_CASE__ = torch.optim.SGD(model.parameters() , 0.1 )
SCREAMING_SNAKE_CASE__ = Accelerator()
SCREAMING_SNAKE_CASE__ = accelerator.prepare(__UpperCAmelCase )
try:
pickle.loads(pickle.dumps(__UpperCAmelCase ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 196 | 0 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 286 | import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
a_ = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def __lowerCAmelCase ( A_ : Optional[int] ) -> str:
__UpperCAmelCase = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(A_ , A_ )
a_ = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def __lowerCAmelCase ( A_ : str ) -> List[str]:
__UpperCAmelCase = list(s_dict.keys() )
for key in keys:
__UpperCAmelCase = key
for k, v in WHISPER_MAPPING.items():
if k in key:
__UpperCAmelCase = new_key.replace(A_ , A_ )
print(F'''{key} -> {new_key}''' )
__UpperCAmelCase = s_dict.pop(A_ )
return s_dict
def __lowerCAmelCase ( A_ : List[Any] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase = emb.weight.shape
__UpperCAmelCase = nn.Linear(A_ , A_ , bias=A_ )
__UpperCAmelCase = emb.weight.data
return lin_layer
def __lowerCAmelCase ( A_ : str , A_ : str ) -> bytes:
os.makedirs(A_ , exist_ok=A_ )
__UpperCAmelCase = os.path.basename(A_ )
__UpperCAmelCase = url.split("/" )[-2]
__UpperCAmelCase = os.path.join(A_ , A_ )
if os.path.exists(A_ ) and not os.path.isfile(A_ ):
raise RuntimeError(F'''{download_target} exists and is not a regular file''' )
if os.path.isfile(A_ ):
__UpperCAmelCase = open(A_ , "rb" ).read()
if hashlib.shaaaa(A_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(A_ ) as source, open(A_ , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=A_ , unit_divisor=10_24 ) as loop:
while True:
__UpperCAmelCase = source.read(81_92 )
if not buffer:
break
output.write(A_ )
loop.update(len(A_ ) )
__UpperCAmelCase = open(A_ , "rb" ).read()
if hashlib.shaaaa(A_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def __lowerCAmelCase ( A_ : Dict , A_ : Optional[Any] ) -> Optional[Any]:
if ".pt" not in checkpoint_path:
__UpperCAmelCase = _download(_MODELS[checkpoint_path] )
else:
__UpperCAmelCase = torch.load(A_ , map_location="cpu" )
__UpperCAmelCase = original_checkpoint["dims"]
__UpperCAmelCase = original_checkpoint["model_state_dict"]
__UpperCAmelCase = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(A_ )
rename_keys(A_ )
__UpperCAmelCase = True
__UpperCAmelCase = state_dict["decoder.layers.0.fc1.weight"].shape[0]
__UpperCAmelCase = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=A_ , decoder_ffn_dim=A_ , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
__UpperCAmelCase = WhisperForConditionalGeneration(A_ )
__UpperCAmelCase , __UpperCAmelCase = model.model.load_state_dict(A_ , strict=A_ )
if len(A_ ) > 0 and not set(A_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F''' but all the following weights are missing {missing}''' )
if tie_embeds:
__UpperCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
__UpperCAmelCase = proj_out_weights
model.save_pretrained(A_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
a_ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 286 | 1 |
"""simple docstring"""
A_ : Union[str, Any] = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
A_ : Tuple = [{"type": "code", "content": INSTALL_CONTENT}]
A_ : Optional[Any] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 196 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class lowerCamelCase (unittest.TestCase ):
def __init__( self : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any]=1_3 , __UpperCAmelCase : Tuple=3_0 , __UpperCAmelCase : str=2 , __UpperCAmelCase : List[Any]=3 , __UpperCAmelCase : Any=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Optional[Any]=3_2 , __UpperCAmelCase : Any=5 , __UpperCAmelCase : Optional[Any]=4 , __UpperCAmelCase : Tuple=3_7 , __UpperCAmelCase : List[str]="gelu" , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : int=1_0 , __UpperCAmelCase : Any=0.02 , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ = num_patches + 1
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
return config, pixel_values
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = FlaxViTModel(config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ = (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE__ = (self.patch_size, self.patch_size)
SCREAMING_SNAKE_CASE__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ = FlaxViTForImageClassification(config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = FlaxViTForImageClassification(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class lowerCamelCase (A__ ,unittest.TestCase ):
lowerCamelCase__ : Tuple = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE ( self : int ) -> None:
SCREAMING_SNAKE_CASE__ = FlaxViTModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
@jax.jit
def model_jitted(__UpperCAmelCase : int , **__UpperCAmelCase : Tuple ):
return model(pixel_values=__UpperCAmelCase , **__UpperCAmelCase )
with self.subTest("""JIT Enabled""" ):
SCREAMING_SNAKE_CASE__ = model_jitted(**__UpperCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE__ = model_jitted(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
for jitted_output, output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
SCREAMING_SNAKE_CASE__ = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(__UpperCAmelCase )
| 196 | 1 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
__a = 1
__a = 3
__a = (3_2, 3_2)
__a = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase )
return image
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
return model
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(UpperCAmelCase )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> str:
def extract(*UpperCAmelCase , **UpperCAmelCase ):
class a__ :
def __init__( self ) -> Any:
__a = torch.ones([0] )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> Optional[Any]:
self.pixel_values.to(UpperCAmelCase )
return self
return Out()
return extract
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a = self.dummy_cond_unet
__a = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , )
__a = self.dummy_vae
__a = self.dummy_text_encoder
__a = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
__a = StableDiffusionPipeline(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , )
__a = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
__a = 'A painting of a squirrel eating a burger'
__a = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
__a = sd_pipe([prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
__a = output.images
__a = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
__a = sd_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase , )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__a = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
__a = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a = self.dummy_cond_unet
__a = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
__a = self.dummy_vae
__a = self.dummy_text_encoder
__a = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
__a = StableDiffusionPipeline(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , )
__a = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
__a = 'A painting of a squirrel eating a burger'
__a = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
__a = sd_pipe([prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
__a = output.images
__a = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
__a = sd_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase , )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__a = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> str:
__a = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=UpperCAmelCase )
assert isinstance(UpperCAmelCase , UpperCAmelCase )
assert isinstance(pipe.scheduler , UpperCAmelCase )
assert pipe.safety_checker is None
__a = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase )
__a = StableDiffusionPipeline.from_pretrained(UpperCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__a = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
__a = self.dummy_cond_unet
__a = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
__a = self.dummy_vae
__a = self.dummy_text_encoder
__a = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# put models in fp16
__a = unet.half()
__a = vae.half()
__a = bert.half()
# make sure here that pndm scheduler skips prk
__a = StableDiffusionPipeline(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , )
__a = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
__a = 'A painting of a squirrel eating a burger'
__a = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images
assert image.shape == (1, 6_4, 6_4, 3)
@nightly
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase )
__a = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__a = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
__a = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
__a = 4_0_0_3_6_6_0_3_4_6
__a = 7
# without safety guidance (sld_guidance_scale = 0)
__a = torch.manual_seed(UpperCAmelCase )
__a = sd_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=UpperCAmelCase , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
__a = torch.manual_seed(UpperCAmelCase )
__a = sd_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=UpperCAmelCase , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> int:
__a = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase )
__a = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__a = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
__a = 'padme amidala taking a bath artwork, safe for work, no nudity'
__a = 2_7_3_4_9_7_1_7_5_5
__a = 7
__a = torch.manual_seed(UpperCAmelCase )
__a = sd_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=UpperCAmelCase , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
__a = torch.manual_seed(UpperCAmelCase )
__a = sd_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=UpperCAmelCase , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
__a = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' )
__a = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
__a = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
__a = 1_0_4_4_3_5_5_2_3_4
__a = 1_2
__a = torch.manual_seed(UpperCAmelCase )
__a = sd_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=UpperCAmelCase , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
__a = torch.manual_seed(UpperCAmelCase )
__a = sd_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=UpperCAmelCase , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 246 | def lowerCAmelCase( __lowerCamelCase ):
__a = len(__lowerCamelCase )
while cur > 1:
# Find the maximum number in arr
__a = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__a = arr[mi::-1] + arr[mi + 1 : len(__lowerCamelCase )]
# Reverse whole list
__a = arr[cur - 1 :: -1] + arr[cur : len(__lowerCamelCase )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase_ : Optional[int] = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 246 | 1 |
from typing import List
import numpy as np
def lowerCamelCase ( UpperCamelCase : dict ) -> int:
_lowerCamelCase = {key: len(UpperCamelCase ) for key, value in gen_kwargs.items() if isinstance(UpperCamelCase , UpperCamelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'Sharding is ambiguous for this dataset: '
+ 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'
+ '\n'.join(F"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() )
+ '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '
+ 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'
) )
_lowerCamelCase = max(lists_lengths.values() , default=0 )
return max(1 , UpperCamelCase )
def lowerCamelCase ( UpperCamelCase : int , UpperCamelCase : int ) -> List[range]:
_lowerCamelCase = []
for group_idx in range(UpperCamelCase ):
_lowerCamelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_lowerCamelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_lowerCamelCase = range(UpperCamelCase , start + num_shards_to_add )
shards_indices_per_group.append(UpperCamelCase )
return shards_indices_per_group
def lowerCamelCase ( UpperCamelCase : dict , UpperCamelCase : int ) -> List[dict]:
_lowerCamelCase = _number_of_shards_in_gen_kwargs(UpperCamelCase )
if num_shards == 1:
return [dict(UpperCamelCase )]
else:
_lowerCamelCase = _distribute_shards(num_shards=UpperCamelCase , max_num_jobs=UpperCamelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(UpperCamelCase , UpperCamelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(UpperCamelCase ) )
]
def lowerCamelCase ( UpperCamelCase : List[dict] ) -> dict:
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , UpperCamelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def lowerCamelCase ( UpperCamelCase : np.random.Generator , UpperCamelCase : dict ) -> dict:
_lowerCamelCase = {len(UpperCamelCase ) for value in gen_kwargs.values() if isinstance(UpperCamelCase , UpperCamelCase )}
_lowerCamelCase = {}
for size in list_sizes:
_lowerCamelCase = list(range(UpperCamelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_lowerCamelCase = dict(UpperCamelCase )
for key, value in shuffled_kwargs.items():
if isinstance(UpperCamelCase , UpperCamelCase ):
_lowerCamelCase = [value[i] for i in indices_per_size[len(UpperCamelCase )]]
return shuffled_kwargs | 544 | import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class lowerCAmelCase__ :
'''simple docstring'''
@staticmethod
def _snake_case ( *snake_case__ : Optional[int] , **snake_case__ : Tuple ) -> Optional[Any]:
pass
def lowerCamelCase ( UpperCamelCase : str ) -> Tuple:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
A = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _snake_case ( self : str , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Union[str, Any] ) -> Dict:
_lowerCamelCase = pipeline(
'document-question-answering' , model=snake_case__ , tokenizer=snake_case__ , image_processor=snake_case__ )
_lowerCamelCase = INVOICE_URL
_lowerCamelCase = list(zip(*apply_tesseract(load_image(snake_case__ ) , snake_case__ , '' ) ) )
_lowerCamelCase = 'What is the placebo?'
_lowerCamelCase = [
{
'image': load_image(snake_case__ ),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def _snake_case ( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] ) -> Tuple:
_lowerCamelCase = dqa_pipeline(snake_case__ , top_k=2 )
self.assertEqual(
snake_case__ , [
[
{'score': ANY(snake_case__ ), 'answer': ANY(snake_case__ ), 'start': ANY(snake_case__ ), 'end': ANY(snake_case__ )},
{'score': ANY(snake_case__ ), 'answer': ANY(snake_case__ ), 'start': ANY(snake_case__ ), 'end': ANY(snake_case__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : Union[str, Any] ) -> Optional[int]:
_lowerCamelCase = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' )
_lowerCamelCase = INVOICE_URL
_lowerCamelCase = 'How many cats are there?'
_lowerCamelCase = [
{'score': 0.0001, 'answer': 'oy 2312/2019', 'start': 3_8, 'end': 3_9},
{'score': 0.0001, 'answer': 'oy 2312/2019 DUE', 'start': 3_8, 'end': 4_0},
]
_lowerCamelCase = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(nested_simplify(snake_case__ , decimals=4 ) , snake_case__ )
_lowerCamelCase = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(nested_simplify(snake_case__ , decimals=4 ) , snake_case__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
_lowerCamelCase = './tests/fixtures/tests_samples/COCO/000000039769.png'
_lowerCamelCase = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(snake_case__ , [] )
# We can optionnally pass directly the words and bounding boxes
_lowerCamelCase = './tests/fixtures/tests_samples/COCO/000000039769.png'
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = dqa_pipeline(image=snake_case__ , question=snake_case__ , words=snake_case__ , boxes=snake_case__ , top_k=2 )
self.assertEqual(snake_case__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : Optional[int] ) -> List[Any]:
_lowerCamelCase = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
_lowerCamelCase = INVOICE_URL
_lowerCamelCase = 'What is the invoice number?'
_lowerCamelCase = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.9944, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0009, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
_lowerCamelCase = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.9944, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0009, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
_lowerCamelCase = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{'score': 0.9944, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0009, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : int ) -> Optional[Any]:
_lowerCamelCase = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=5_0 , )
_lowerCamelCase = INVOICE_URL
_lowerCamelCase = 'What is the invoice number?'
_lowerCamelCase = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.9974, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.9948, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
_lowerCamelCase = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.9974, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.9948, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
_lowerCamelCase = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{'score': 0.9974, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.9948, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _snake_case ( self : int ) -> List[Any]:
_lowerCamelCase = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=snake_case__ )
_lowerCamelCase = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=snake_case__ , revision='3dc6de3' , )
_lowerCamelCase = INVOICE_URL
_lowerCamelCase = 'What is the invoice number?'
_lowerCamelCase = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.4251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
_lowerCamelCase = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.4251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
_lowerCamelCase = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{'score': 0.4251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
]
]
* 2 , )
_lowerCamelCase = list(zip(*apply_tesseract(load_image(snake_case__ ) , snake_case__ , '' ) ) )
# This model should also work if `image` is set to None
_lowerCamelCase = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.4251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _snake_case ( self : Dict ) -> List[str]:
_lowerCamelCase = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=snake_case__ )
_lowerCamelCase = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=snake_case__ , revision='3dc6de3' , max_seq_len=5_0 , )
_lowerCamelCase = INVOICE_URL
_lowerCamelCase = 'What is the invoice number?'
_lowerCamelCase = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.9999, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.9998, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
_lowerCamelCase = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{'score': 0.9999, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.9998, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
]
]
* 2 , )
_lowerCamelCase = list(zip(*apply_tesseract(load_image(snake_case__ ) , snake_case__ , '' ) ) )
# This model should also work if `image` is set to None
_lowerCamelCase = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.9999, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.9998, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
@slow
@require_torch
def _snake_case ( self : Dict ) -> int:
_lowerCamelCase = pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
_lowerCamelCase = INVOICE_URL
_lowerCamelCase = 'What is the invoice number?'
_lowerCamelCase = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(nested_simplify(snake_case__ , decimals=4 ) , [{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def _snake_case ( self : Optional[Any] ) -> Any:
pass | 544 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self : Dict , A_ : List[Any] , A_ : Optional[Any]=13 , A_ : Any=7 , A_ : Union[str, Any]=True , A_ : Optional[Any]=True , A_ : Dict=True , A_ : Optional[int]=True , A_ : Optional[int]=99 , A_ : int=32 , A_ : Tuple=2 , A_ : Union[str, Any]=4 , A_ : Tuple=37 , A_ : Union[str, Any]="gelu" , A_ : Union[str, Any]=0.1 , A_ : int=0.1 , A_ : List[str]=5_12 , A_ : Dict=16 , A_ : str=2 , A_ : Any=0.02 , A_ : Any=3 , A_ : Any=4 , A_ : int=None , )-> Dict:
__UpperCamelCase = parent
__UpperCamelCase = 13
__UpperCamelCase = 7
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = 99
__UpperCamelCase = 32
__UpperCamelCase = 2
__UpperCamelCase = 4
__UpperCamelCase = 37
__UpperCamelCase = "gelu"
__UpperCamelCase = 0.1
__UpperCamelCase = 0.1
__UpperCamelCase = 5_12
__UpperCamelCase = 16
__UpperCamelCase = 2
__UpperCamelCase = 0.02
__UpperCamelCase = 3
__UpperCamelCase = 4
__UpperCamelCase = None
def A ( self : List[str] )-> List[Any]:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
if self.use_token_type_ids:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : List[Any] , A_ : List[str] , A_ : Optional[int] , A_ : Union[str, Any] , A_ : Dict , A_ : Union[str, Any] , A_ : str , A_ : Tuple )-> Union[str, Any]:
__UpperCamelCase = TFRoFormerModel(config=A_ )
__UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCamelCase = [input_ids, input_mask]
__UpperCamelCase = model(A_ )
__UpperCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : str , A_ : Tuple , A_ : List[str] , A_ : Dict , A_ : Any , A_ : Dict , A_ : str , A_ : List[Any] )-> List[Any]:
__UpperCamelCase = True
__UpperCamelCase = TFRoFormerForCausalLM(config=A_ )
__UpperCamelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase = model(A_ )["logits"]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A ( self : Any , A_ : Optional[Any] , A_ : List[Any] , A_ : str , A_ : Dict , A_ : List[Any] , A_ : Any , A_ : Optional[int] )-> str:
__UpperCamelCase = TFRoFormerForMaskedLM(config=A_ )
__UpperCamelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : List[str] , A_ : Any , A_ : Any , A_ : Optional[Any] , A_ : Optional[int] , A_ : Optional[int] , A_ : Optional[int] , A_ : List[str] )-> List[str]:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFRoFormerForSequenceClassification(config=A_ )
__UpperCamelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : List[Any] , A_ : str , A_ : Dict , A_ : List[Any] , A_ : List[Any] , A_ : Any , A_ : Optional[int] , A_ : Tuple )-> List[str]:
__UpperCamelCase = self.num_choices
__UpperCamelCase = TFRoFormerForMultipleChoice(config=A_ )
__UpperCamelCase = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
__UpperCamelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : List[Any] , A_ : Union[str, Any] , A_ : Optional[int] , A_ : Any , A_ : Tuple , A_ : Tuple , A_ : Dict , A_ : Optional[int] )-> List[str]:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFRoFormerForTokenClassification(config=A_ )
__UpperCamelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : int , A_ : Optional[Any] , A_ : Tuple , A_ : Any , A_ : Dict , A_ : str , A_ : Dict , A_ : Optional[int] )-> Optional[Any]:
__UpperCamelCase = TFRoFormerForQuestionAnswering(config=A_ )
__UpperCamelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase = model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : int )-> List[str]:
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = config_and_inputs
__UpperCamelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Union[str, Any] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : List[str] = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : str = False
_snake_case : Union[str, Any] = False
def A ( self : List[str] , A_ : Union[str, Any] , A_ : List[str] , A_ : List[Any] , A_ : Dict , A_ : Union[str, Any] )-> Any:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A ( self : Tuple )-> Tuple:
__UpperCamelCase = TFRoFormerModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=A_ , hidden_size=37 )
def A ( self : Tuple )-> List[Any]:
self.config_tester.run_common_tests()
def A ( self : Dict )-> Dict:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def A ( self : int )-> str:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def A ( self : List[str] )-> List[Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*A_ )
def A ( self : int )-> List[str]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def A ( self : Any )-> Union[str, Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def A ( self : Tuple )-> str:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def A ( self : Optional[int] )-> Optional[Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def A ( self : int )-> int:
__UpperCamelCase = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" )
self.assertIsNotNone(A_ )
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Dict )-> Tuple:
__UpperCamelCase = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
__UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase = model(A_ )[0]
# TODO Replace vocab size
__UpperCamelCase = 5_00_00
__UpperCamelCase = [1, 6, vocab_size]
self.assertEqual(output.shape , A_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__UpperCamelCase = tf.constant(
[
[
[-0.12_053_341, -1.0_264_901, 0.29_221_946],
[-1.5_133_783, 0.197_433, 0.15_190_607],
[-5.0_135_403, -3.900_256, -0.84_038_764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1e-4 )
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_snake_case : Tuple = 1E-4
def A ( self : str )-> Tuple:
__UpperCamelCase = tf.constant([[4, 10]] )
__UpperCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__UpperCamelCase = emba(input_ids.shape )
__UpperCamelCase = tf.constant(
[[0.0_000, 0.0_000, 0.0_000, 1.0_000, 1.0_000, 1.0_000], [0.8_415, 0.0_464, 0.0_022, 0.5_403, 0.9_989, 1.0_000]] )
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
def A ( self : Tuple )-> Optional[int]:
__UpperCamelCase = tf.constant(
[
[0.0_000, 0.0_000, 0.0_000, 0.0_000, 0.0_000],
[0.8_415, 0.8_219, 0.8_020, 0.7_819, 0.7_617],
[0.9_093, 0.9_364, 0.9_581, 0.9_749, 0.9_870],
] )
__UpperCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12 )
emba([2, 16, 5_12] )
__UpperCamelCase = emba.weight[:3, :5]
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_snake_case : int = 1E-4
def A ( self : Optional[Any] )-> Any:
# 2,12,16,64
__UpperCamelCase = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
__UpperCamelCase = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
__UpperCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__UpperCamelCase = embed_positions([2, 16, 7_68] )[None, None, :, :]
__UpperCamelCase , __UpperCamelCase = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
A_ , A_ , A_ )
__UpperCamelCase = tf.constant(
[
[0.0_000, 0.0_100, 0.0_200, 0.0_300, 0.0_400, 0.0_500, 0.0_600, 0.0_700],
[-0.2_012, 0.8_897, 0.0_263, 0.9_401, 0.2_074, 0.9_463, 0.3_481, 0.9_343],
[-1.7_057, 0.6_271, -1.2_145, 1.3_897, -0.6_303, 1.7_647, -0.1_173, 1.8_985],
[-2.1_731, -1.6_397, -2.7_358, 0.2_854, -2.1_840, 1.7_183, -1.3_018, 2.4_871],
[0.2_717, -3.6_173, -2.9_206, -2.1_988, -3.6_638, 0.3_858, -2.9_155, 2.2_980],
[3.9_859, -2.1_580, -0.7_984, -4.4_904, -4.1_181, -2.0_252, -4.4_782, 1.1_253],
] )
__UpperCamelCase = tf.constant(
[
[0.0_000, -0.0_100, -0.0_200, -0.0_300, -0.0_400, -0.0_500, -0.0_600, -0.0_700],
[0.2_012, -0.8_897, -0.0_263, -0.9_401, -0.2_074, -0.9_463, -0.3_481, -0.9_343],
[1.7_057, -0.6_271, 1.2_145, -1.3_897, 0.6_303, -1.7_647, 0.1_173, -1.8_985],
[2.1_731, 1.6_397, 2.7_358, -0.2_854, 2.1_840, -1.7_183, 1.3_018, -2.4_871],
[-0.2_717, 3.6_173, 2.9_206, 2.1_988, 3.6_638, -0.3_858, 2.9_155, -2.2_980],
[-3.9_859, 2.1_580, 0.7_984, 4.4_904, 4.1_181, 2.0_252, 4.4_782, -1.1_253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A_ , atol=self.tolerance ) | 228 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A = logging.get_logger(__name__)
_A = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : Union[str, Any] = 'conditional_detr'
_snake_case : Any = ['past_key_values']
_snake_case : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : List[Any] , A_ : int=True , A_ : List[Any]=None , A_ : int=3 , A_ : Union[str, Any]=3_00 , A_ : int=6 , A_ : List[Any]=20_48 , A_ : str=8 , A_ : Dict=6 , A_ : str=20_48 , A_ : str=8 , A_ : str=0.0 , A_ : List[Any]=0.0 , A_ : Union[str, Any]=True , A_ : List[str]="relu" , A_ : Optional[Any]=2_56 , A_ : Optional[int]=0.1 , A_ : Tuple=0.0 , A_ : List[str]=0.0 , A_ : Any=0.02 , A_ : int=1.0 , A_ : Any=False , A_ : Tuple="sine" , A_ : int="resnet50" , A_ : Dict=True , A_ : List[str]=False , A_ : Optional[Any]=2 , A_ : List[Any]=5 , A_ : List[str]=2 , A_ : Union[str, Any]=1 , A_ : Dict=1 , A_ : str=2 , A_ : Any=5 , A_ : Optional[int]=2 , A_ : List[str]=0.25 , **A_ : Union[str, Any] , )-> Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__UpperCamelCase = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(A_ , A_ ):
__UpperCamelCase = backbone_config.get("model_type" )
__UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase = config_class.from_dict(A_ )
__UpperCamelCase = use_timm_backbone
__UpperCamelCase = backbone_config
__UpperCamelCase = num_channels
__UpperCamelCase = num_queries
__UpperCamelCase = d_model
__UpperCamelCase = encoder_ffn_dim
__UpperCamelCase = encoder_layers
__UpperCamelCase = encoder_attention_heads
__UpperCamelCase = decoder_ffn_dim
__UpperCamelCase = decoder_layers
__UpperCamelCase = decoder_attention_heads
__UpperCamelCase = dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = activation_function
__UpperCamelCase = init_std
__UpperCamelCase = init_xavier_std
__UpperCamelCase = encoder_layerdrop
__UpperCamelCase = decoder_layerdrop
__UpperCamelCase = encoder_layers
__UpperCamelCase = auxiliary_loss
__UpperCamelCase = position_embedding_type
__UpperCamelCase = backbone
__UpperCamelCase = use_pretrained_backbone
__UpperCamelCase = dilation
# Hungarian matcher
__UpperCamelCase = class_cost
__UpperCamelCase = bbox_cost
__UpperCamelCase = giou_cost
# Loss coefficients
__UpperCamelCase = mask_loss_coefficient
__UpperCamelCase = dice_loss_coefficient
__UpperCamelCase = cls_loss_coefficient
__UpperCamelCase = bbox_loss_coefficient
__UpperCamelCase = giou_loss_coefficient
__UpperCamelCase = focal_alpha
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def A ( self : int )-> int:
return self.encoder_attention_heads
@property
def A ( self : List[Any] )-> int:
return self.d_model
def A ( self : List[Any] )-> Tuple:
__UpperCamelCase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__UpperCamelCase = self.backbone_config.to_dict()
__UpperCamelCase = self.__class__.model_type
return output
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : Union[str, Any] = version.parse('1.11' )
@property
def A ( self : str )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def A ( self : Optional[Any] )-> float:
return 1e-5
@property
def A ( self : List[Any] )-> int:
return 12 | 228 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : Union[str, Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCAmelCase_ : Dict = 2_5_0_0_0_4
UpperCAmelCase_ : Optional[Any] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( _lowerCamelCase, unittest.TestCase ):
'''simple docstring'''
lowercase : str = MBartTokenizer
lowercase : Optional[Any] = MBartTokenizerFast
lowercase : Dict = True
lowercase : List[str] = True
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_SCREAMING_SNAKE_CASE =MBartTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =MBartTokenizer(_A , keep_accents=_A )
_SCREAMING_SNAKE_CASE =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_SCREAMING_SNAKE_CASE =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_SCREAMING_SNAKE_CASE =tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_SCREAMING_SNAKE_CASE =tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_SCREAMING_SNAKE_CASE =(self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_SCREAMING_SNAKE_CASE =self.rust_tokenizer_class.from_pretrained(_A , **_A )
_SCREAMING_SNAKE_CASE =self.tokenizer_class.from_pretrained(_A , **_A )
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =tokenizer_r.save_pretrained(_A )
_SCREAMING_SNAKE_CASE =tokenizer_p.save_pretrained(_A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
_SCREAMING_SNAKE_CASE =tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
_SCREAMING_SNAKE_CASE =tokenizer_r.from_pretrained(_A )
_SCREAMING_SNAKE_CASE =tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=True
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =tokenizer_r.save_pretrained(_A , legacy_format=_A )
_SCREAMING_SNAKE_CASE =tokenizer_p.save_pretrained(_A )
# Checks it save with the same files
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
_SCREAMING_SNAKE_CASE =tokenizer_r.from_pretrained(_A )
_SCREAMING_SNAKE_CASE =tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=False
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =tokenizer_r.save_pretrained(_A , legacy_format=_A )
_SCREAMING_SNAKE_CASE =tokenizer_p.save_pretrained(_A )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_SCREAMING_SNAKE_CASE =tokenizer_r.from_pretrained(_A )
_SCREAMING_SNAKE_CASE =tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
lowercase : Optional[Any] = "facebook/mbart-large-en-ro"
lowercase : List[str] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
lowercase : int = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
lowercase : Optional[Any] = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
_SCREAMING_SNAKE_CASE =1
return cls
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 2_5_0_0_2_0 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.assertIn(_A , self.tokenizer.all_special_ids )
_SCREAMING_SNAKE_CASE =[RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
_SCREAMING_SNAKE_CASE =self.tokenizer.decode(_A , skip_special_tokens=_A )
_SCREAMING_SNAKE_CASE =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_A )
self.assertEqual(_A , _A )
self.assertNotIn(self.tokenizer.eos_token , _A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =['''this is gunna be a long sentence ''' * 2_0]
assert isinstance(src_text[0] , _A )
_SCREAMING_SNAKE_CASE =1_0
_SCREAMING_SNAKE_CASE =self.tokenizer(_A , max_length=_A , truncation=_A ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _A )
self.assertEqual(len(_A ) , _A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_A )
_SCREAMING_SNAKE_CASE =MBartTokenizer.from_pretrained(_A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _A )
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_A , return_tensors='''pt''' )
_SCREAMING_SNAKE_CASE =shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_A , truncation=_A , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
_SCREAMING_SNAKE_CASE =shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(_A , _A )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
_SCREAMING_SNAKE_CASE =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _A )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.tokenizer(self.src_text , padding=_A , truncation=_A , max_length=3 , return_tensors='''pt''' )
_SCREAMING_SNAKE_CASE =self.tokenizer(
text_target=self.tgt_text , padding=_A , truncation=_A , max_length=1_0 , return_tensors='''pt''' )
_SCREAMING_SNAKE_CASE =targets['''input_ids''']
_SCREAMING_SNAKE_CASE =shift_tokens_right(_A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(_A ) , {
# A, test, EOS, en_XX
'''input_ids''': [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 2_5_0_0_0_1,
} , )
| 255 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase : Any = "gpt_neo"
lowercase : Optional[int] = ["past_key_values"]
lowercase : str = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , _A=5_0_2_5_7 , _A=2_0_4_8 , _A=2_0_4_8 , _A=2_4 , _A=[[["global", "local"], 1_2]] , _A=1_6 , _A=None , _A=2_5_6 , _A="gelu_new" , _A=0.0 , _A=0.0 , _A=0.0 , _A=0.1 , _A=1E-5 , _A=0.02 , _A=True , _A=5_0_2_5_6 , _A=5_0_2_5_6 , **_A , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_layers
_SCREAMING_SNAKE_CASE =num_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =window_size
_SCREAMING_SNAKE_CASE =activation_function
_SCREAMING_SNAKE_CASE =resid_dropout
_SCREAMING_SNAKE_CASE =embed_dropout
_SCREAMING_SNAKE_CASE =attention_dropout
_SCREAMING_SNAKE_CASE =classifier_dropout
_SCREAMING_SNAKE_CASE =layer_norm_epsilon
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =use_cache
_SCREAMING_SNAKE_CASE =bos_token_id
_SCREAMING_SNAKE_CASE =eos_token_id
_SCREAMING_SNAKE_CASE =attention_types
_SCREAMING_SNAKE_CASE =self.expand_attention_types_params(_A )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.attention_layers)` == `config.num_layers` '''
f"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
f"""`config.num_layers = {self.num_layers}`. """
'''`config.attention_layers` is prepared using `config.attention_types`. '''
'''Please verify the value of `config.attention_types` argument.''' )
super().__init__(bos_token_id=_A , eos_token_id=_A , **_A )
@staticmethod
def UpperCamelCase_ ( _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _lowerCAmelCase(a : int , a : Tuple , a : Union[str, Any] , a : Optional[Any] ) -> str:
import torch
_SCREAMING_SNAKE_CASE =input.size()
_SCREAMING_SNAKE_CASE =len(a )
_SCREAMING_SNAKE_CASE =shape[dimension]
_SCREAMING_SNAKE_CASE =torch.arange(0 , a , a )
_SCREAMING_SNAKE_CASE =torch.div(sizedim - size , a , rounding_mode='''floor''' ) + 1
_SCREAMING_SNAKE_CASE =torch.arange(a ) + low_indices[:min_length][:, None]
_SCREAMING_SNAKE_CASE =[slice(a )] * rank
_SCREAMING_SNAKE_CASE =indices
_SCREAMING_SNAKE_CASE =input[s]
_SCREAMING_SNAKE_CASE =list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(a )
def _lowerCAmelCase(a : Optional[Any] , a : Optional[int] ) -> List[str]:
import torch
_SCREAMING_SNAKE_CASE =torch.arange(1 , a )
_SCREAMING_SNAKE_CASE =torch.remainder(a , a )
_SCREAMING_SNAKE_CASE =remainders == 0
_SCREAMING_SNAKE_CASE =candidates[divisor_indices]
_SCREAMING_SNAKE_CASE =torch.max(a )
return largest_divisor, torch.div(a , a , rounding_mode='''floor''' )
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(_A , direction='''inputs''' )
_SCREAMING_SNAKE_CASE ={0: '''batch''', 1: '''past_sequence + sequence'''}
else:
_SCREAMING_SNAKE_CASE ={0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self._config.num_heads
def UpperCamelCase_ ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =super(_A , self ).generate_dummy_inputs(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
# We need to order the input in the way they appears in the forward()
_SCREAMING_SNAKE_CASE =OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_SCREAMING_SNAKE_CASE =seqlen + 2
_SCREAMING_SNAKE_CASE =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_SCREAMING_SNAKE_CASE =[
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(self.num_layers )
]
_SCREAMING_SNAKE_CASE =common_inputs['''attention_mask''']
if self.use_past:
_SCREAMING_SNAKE_CASE =ordered_inputs['''attention_mask'''].dtype
_SCREAMING_SNAKE_CASE =torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_A , _A , dtype=_A )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return 1_3
| 255 | 1 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
_UpperCamelCase: Optional[int] ='tiny-wmt19-en-ru'
# Build
# borrowed from a test
_UpperCamelCase: Tuple =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
_UpperCamelCase: List[Any] =dict(zip(vocab, range(len(vocab))))
_UpperCamelCase: List[str] =['l o 123', 'lo w 1456', 'e r</w> 1789', '']
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase: Optional[Any] =Path(tmpdirname)
_UpperCamelCase: Optional[int] =build_dir / VOCAB_FILES_NAMES['src_vocab_file']
_UpperCamelCase: List[Any] =build_dir / VOCAB_FILES_NAMES['tgt_vocab_file']
_UpperCamelCase: Optional[int] =build_dir / VOCAB_FILES_NAMES['merges_file']
with open(src_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, 'w') as fp:
fp.write('\n'.join(merges))
_UpperCamelCase: Optional[Any] =FSMTTokenizer(
langs=['en', 'ru'],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
_UpperCamelCase: Tuple =FSMTConfig(
langs=['ru', 'en'],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
_UpperCamelCase: Dict =FSMTForConditionalGeneration(config)
print(F"num of params {tiny_model.num_parameters()}")
# Test
_UpperCamelCase: Optional[Any] =tokenizer(['Making tiny model'], return_tensors='pt')
_UpperCamelCase: Optional[int] =tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 585 |
from math import isqrt
def _a ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(__SCREAMING_SNAKE_CASE ) + 1 ) )
def _a ( __SCREAMING_SNAKE_CASE : int = 10**6 ):
"""simple docstring"""
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 7
while prime_candidate < max_prime:
primes_count += is_prime(__SCREAMING_SNAKE_CASE )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"{solution() = }")
| 585 | 1 |
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
lowerCamelCase__ : str = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _UpperCAmelCase ( datasets.BuilderConfig):
__a : Optional[datasets.Features] = None
def UpperCamelCase ( _lowerCAmelCase : "pyspark.sql.DataFrame", _lowerCAmelCase : List[int], ) -> Dict:
import pyspark
def generate_fn():
_UpperCAmelCase : List[str] = df.select("""*""", pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
_UpperCAmelCase : Dict = df_with_partition_id.select("""*""" ).where(f'''part_id = {partition_id}''' ).drop("""part_id""" )
_UpperCAmelCase : Any = partition_df.collect()
_UpperCAmelCase : str = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class _UpperCAmelCase ( _BaseExamplesIterable):
def __init__( self , _A , _A=None , ) -> str:
'''simple docstring'''
_UpperCAmelCase : int = df
_UpperCAmelCase : int = partition_order or range(self.df.rdd.getNumPartitions() )
_UpperCAmelCase : List[str] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ) -> Dict:
'''simple docstring'''
yield from self.generate_examples_fn()
def __snake_case ( self , _A ) -> "SparkExamplesIterable":
'''simple docstring'''
_UpperCAmelCase : List[str] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(_A )
return SparkExamplesIterable(self.df , partition_order=_A )
def __snake_case ( self , _A , _A ) -> "SparkExamplesIterable":
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.split_shard_indices_by_worker(_A , _A )
return SparkExamplesIterable(self.df , partition_order=_A )
@property
def __snake_case ( self ) -> int:
'''simple docstring'''
return len(self.partition_order )
class _UpperCAmelCase ( datasets.DatasetBuilder):
__a : Tuple = SparkConfig
def __init__( self , _A , _A = None , _A = None , **_A , ) -> int:
'''simple docstring'''
import pyspark
_UpperCAmelCase : Optional[Any] = pyspark.sql.SparkSession.builder.getOrCreate()
_UpperCAmelCase : str = df
_UpperCAmelCase : Optional[Any] = working_dir
super().__init__(
cache_dir=_A , config_name=str(self.df.semanticHash() ) , **_A , )
def __snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
def create_cache_and_write_probe(_A ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=_A )
_UpperCAmelCase : Any = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(_A , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_UpperCAmelCase : int = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(_A ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def __snake_case ( self ) -> str:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def __snake_case ( self , _A ) -> Any:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __snake_case ( self , _A ) -> Optional[int]:
'''simple docstring'''
import pyspark
def get_arrow_batch_size(_A ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
_UpperCAmelCase : Union[str, Any] = self.df.count()
_UpperCAmelCase : List[Any] = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_UpperCAmelCase : Any = (
self.df.limit(_A )
.repartition(1 )
.mapInArrow(_A , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_UpperCAmelCase : Tuple = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_UpperCAmelCase : List[Any] = min(_A , int(approx_total_size / max_shard_size ) )
_UpperCAmelCase : int = self.df.repartition(_A )
def __snake_case ( self , _A , _A , _A , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
'''simple docstring'''
import pyspark
_UpperCAmelCase : str = ParquetWriter if file_format == """parquet""" else ArrowWriter
_UpperCAmelCase : Any = os.path.join(self._working_dir , os.path.basename(_A ) ) if self._working_dir else fpath
_UpperCAmelCase : str = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_UpperCAmelCase : List[Any] = self.config.features
_UpperCAmelCase : Tuple = self._writer_batch_size
_UpperCAmelCase : Any = self._fs.storage_options
def write_arrow(_A ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_UpperCAmelCase : Optional[int] = pyspark.TaskContext().taskAttemptId()
_UpperCAmelCase : Tuple = next(_A , _A )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : Union[str, Any] = writer_class(
features=_A , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=_A , storage_options=_A , embed_local_files=_A , )
_UpperCAmelCase : Optional[int] = pa.Table.from_batches([first_batch] )
writer.write_table(_A )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_UpperCAmelCase , _UpperCAmelCase : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
_UpperCAmelCase : Any = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=_A , storage_options=_A , embed_local_files=_A , )
_UpperCAmelCase : Dict = pa.Table.from_batches([batch] )
writer.write_table(_A )
if writer._num_bytes > 0:
_UpperCAmelCase , _UpperCAmelCase : int = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(_A ) ):
_UpperCAmelCase : Union[str, Any] = os.path.join(os.path.dirname(_A ) , os.path.basename(_A ) )
shutil.move(_A , _A )
_UpperCAmelCase : Optional[Any] = (
self.df.mapInArrow(_A , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __snake_case ( self , _A , _A = "arrow" , _A = None , _A = None , **_A , ) -> Any:
'''simple docstring'''
self._validate_cache_dir()
_UpperCAmelCase : Optional[int] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(_A )
_UpperCAmelCase : Optional[int] = not is_remote_filesystem(self._fs )
_UpperCAmelCase : Union[str, Any] = os.path.join if is_local else posixpath.join
_UpperCAmelCase : Optional[Any] = """-TTTTT-SSSSS-of-NNNNN"""
_UpperCAmelCase : Tuple = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
_UpperCAmelCase : List[Any] = path_join(self._output_dir , _A )
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Any = 0
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : Dict = []
for task_id, content in self._prepare_split_single(_A , _A , _A ):
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Any = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(_A )
_UpperCAmelCase : Any = total_num_examples
_UpperCAmelCase : List[str] = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
_UpperCAmelCase : List[str] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_UpperCAmelCase : List[Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
_A , _A , _A , ):
rename(
_A , fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , f'''{global_shard_id:05d}''' ).replace("""NNNNN""" , f'''{total_shards:05d}''' ) , )
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : List[str] = 0
for i in range(len(_A ) ):
_UpperCAmelCase , _UpperCAmelCase : Any = task_id_and_num_shards[i]
for shard_id in range(_A ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(_A , len(_A ) ).map(lambda _A : _rename_shard(*_A ) ).collect()
else:
# don't use any pattern
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : Optional[int] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace(_A , """""" ) , )
def __snake_case ( self , _A , ) -> SparkExamplesIterable:
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 238 |
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 238 | 1 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] =logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class A_ ( UpperCamelCase_ ):
def __init__( self : Union[str, Any] , snake_case__ : int = 1_01 ):
lowercase = length
def __len__( self : Dict ):
return self.length
def __getitem__( self : int , snake_case__ : str ):
return i
class A_ :
def __call__( self : Dict , snake_case__ : List[Any] ):
return {"input_ids": torch.tensor(UpperCamelCase__ ), "labels": torch.tensor(UpperCamelCase__ )}
class A_ ( nn.Module ):
def __init__( self : List[str] ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowercase = nn.Linear(1_20 , 80 )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : List[Any] , snake_case__ : Any=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class A_ ( UpperCamelCase_ ):
@require_torch_neuroncore
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = F"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
lowercase = self.get_auto_remove_tmp_dir()
lowercase = F"""--output_dir {output_dir}""".split()
lowercase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(UpperCamelCase__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class A_ ( UpperCamelCase_ ):
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowercase = F"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
lowercase = self.get_auto_remove_tmp_dir()
lowercase = F"""--output_dir {output_dir}""".split()
lowercase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(UpperCamelCase__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__SCREAMING_SNAKE_CASE : Any =HfArgumentParser((TrainingArguments,))
__SCREAMING_SNAKE_CASE : Tuple =parser.parse_args_into_dataclasses()[0]
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
__SCREAMING_SNAKE_CASE : List[str] =DummyDataset(dataset_length)
def UpperCamelCase__ ( lowerCAmelCase__ ):
lowercase = list(range(len(_lowercase ) ) )
lowercase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"""Predictions and/or labels do not match expected results:\n - predictions: """
f"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
__SCREAMING_SNAKE_CASE : Any =Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__SCREAMING_SNAKE_CASE : str =trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__SCREAMING_SNAKE_CASE : Tuple =trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__SCREAMING_SNAKE_CASE : Any =2
__SCREAMING_SNAKE_CASE : Any =trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__SCREAMING_SNAKE_CASE : Optional[int] =trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__SCREAMING_SNAKE_CASE : List[str] =None
| 712 |
import argparse
import os
import re
import packaging.version
__SCREAMING_SNAKE_CASE : Optional[int] ='''examples/'''
__SCREAMING_SNAKE_CASE : Any ={
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__SCREAMING_SNAKE_CASE : Union[str, Any] ={
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
__SCREAMING_SNAKE_CASE : Any ='''README.md'''
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
with open(lowerCAmelCase__ ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
lowercase = f.read()
lowercase , lowercase = REPLACE_PATTERNS[pattern]
lowercase = replace.replace("""VERSION""" ,lowerCAmelCase__ )
lowercase = re_pattern.sub(lowerCAmelCase__ ,lowerCAmelCase__ )
with open(lowerCAmelCase__ ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
f.write(lowerCAmelCase__ )
def UpperCamelCase__ ( lowerCAmelCase__ ):
for folder, directories, fnames in os.walk(lowerCAmelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ ) ,lowerCAmelCase__ ,pattern="""examples""" )
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
if not patch:
update_version_in_examples(lowerCAmelCase__ )
def UpperCamelCase__ ( ):
lowercase = """🤗 Transformers currently provides the following architectures"""
lowercase = """1. Want to contribute a new model?"""
with open(lowerCAmelCase__ ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
lowercase = f.readlines()
# Find the start of the list.
lowercase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowercase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowercase = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" ,"""https://huggingface.co/docs/transformers/model_doc""" ,)
index += 1
with open(lowerCAmelCase__ ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
f.writelines(lowerCAmelCase__ )
def UpperCamelCase__ ( ):
with open(REPLACE_FILES["""init"""] ,"""r""" ) as f:
lowercase = f.read()
lowercase = REPLACE_PATTERNS["""init"""][0].search(lowerCAmelCase__ ).groups()[0]
return packaging.version.parse(lowerCAmelCase__ )
def UpperCamelCase__ ( lowerCAmelCase__=False ):
lowercase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowercase = default_version.base_version
elif patch:
lowercase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
lowercase = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
lowercase = input(f"""Which version are you releasing? [{default_version}]""" )
if len(lowerCAmelCase__ ) == 0:
lowercase = default_version
print(f"""Updating version to {version}.""" )
global_version_update(lowerCAmelCase__ ,patch=lowerCAmelCase__ )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def UpperCamelCase__ ( ):
lowercase = get_version()
lowercase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
lowercase = current_version.base_version
# Check with the user we got that right.
lowercase = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(lowerCAmelCase__ ) == 0:
lowercase = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(lowerCAmelCase__ )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__SCREAMING_SNAKE_CASE : Optional[int] =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 72 | 0 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowerCamelCase (_a ):
_lowercase = ["""image_processor""", """tokenizer"""]
_lowercase = """AutoImageProcessor"""
_lowercase = """AutoTokenizer"""
def __init__( self: int,A_: Any,A_: Tuple ):
'''simple docstring'''
super().__init__(A_,A_ )
__UpperCamelCase = self.image_processor
def __call__( self: str,A_: Union[str, Any]=None,A_: Optional[int]=None,A_: Optional[int]=None,**A_: Union[str, Any] ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__UpperCamelCase = self.tokenizer(A_,return_tensors=A_,**A_ )
if images is not None:
__UpperCamelCase = self.image_processor(A_,return_tensors=A_,**A_ )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ),tensor_type=A_ )
def snake_case_ ( self: Dict,*A_: Optional[Any],**A_: Optional[int] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_,**A_ )
def snake_case_ ( self: int,*A_: Union[str, Any],**A_: Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*A_,**A_ )
@property
def snake_case_ ( self: int ):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 1 | '''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_UpperCAmelCase : List[Any] = random.Random()
def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : Dict=1.0 , __snake_case : List[Any]=None , __snake_case : List[str]=None ):
if rng is None:
_A = global_rng
_A = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple, UpperCamelCase__ : Tuple, UpperCamelCase__ : Union[str, Any]=7, UpperCamelCase__ : List[str]=4_00, UpperCamelCase__ : Any=20_00, UpperCamelCase__ : int=10, UpperCamelCase__ : Tuple=1_60, UpperCamelCase__ : Tuple=8, UpperCamelCase__ : List[str]=0.0, UpperCamelCase__ : Optional[Any]=40_00, UpperCamelCase__ : List[str]=False, UpperCamelCase__ : List[Any]=True, ) -> Dict:
_A = parent
_A = batch_size
_A = min_seq_length
_A = max_seq_length
_A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A = padding_value
_A = sampling_rate
_A = return_attention_mask
_A = do_normalize
_A = feature_size
_A = chunk_length
_A = hop_length
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : List[Any]=False, UpperCamelCase__ : str=False ) -> Any:
def _flatten(UpperCamelCase__ : List[str] ):
return list(itertools.chain(*UpperCamelCase__ ) )
if equal_length:
_A = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
_A = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase_ ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = WhisperFeatureExtractor if is_speech_available() else None
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
_A = WhisperFeatureExtractionTester(self )
def __UpperCAmelCase ( self : List[Any] ) -> int:
_A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
_A = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
_A = feat_extract_first.to_dict()
_A = feat_extract_second.to_dict()
_A = feat_extract_first.mel_filters
_A = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCamelCase__, UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__, UpperCamelCase__ )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
_A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = os.path.join(UpperCamelCase__, 'feat_extract.json' )
feat_extract_first.to_json_file(UpperCamelCase__ )
_A = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
_A = feat_extract_first.to_dict()
_A = feat_extract_second.to_dict()
_A = feat_extract_first.mel_filters
_A = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCamelCase__, UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__, UpperCamelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_A = [floats_list((1, x) )[0] for x in range(8_00, 14_00, 2_00 )]
_A = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
_A = feature_extractor(UpperCamelCase__, padding='max_length', return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_A = feature_extractor(speech_inputs[0], return_tensors='np' ).input_features
_A = feature_extractor(np_speech_inputs[0], return_tensors='np' ).input_features
self.assertTrue(np.allclose(UpperCamelCase__, UpperCamelCase__, atol=1e-3 ) )
# Test batched
_A = feature_extractor(UpperCamelCase__, return_tensors='np' ).input_features
_A = feature_extractor(UpperCamelCase__, return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__, UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__, UpperCamelCase__, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_A = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
_A = np.asarray(UpperCamelCase__ )
_A = feature_extractor(UpperCamelCase__, return_tensors='np' ).input_features
_A = feature_extractor(UpperCamelCase__, return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__, UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__, UpperCamelCase__, atol=1e-3 ) )
# Test truncation required
_A = [floats_list((1, x) )[0] for x in range(2_00, (feature_extractor.n_samples + 5_00), 2_00 )]
_A = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
_A = [x[: feature_extractor.n_samples] for x in speech_inputs]
_A = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs_truncated]
_A = feature_extractor(UpperCamelCase__, return_tensors='np' ).input_features
_A = feature_extractor(UpperCamelCase__, return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__, UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__, UpperCamelCase__, atol=1e-3 ) )
def __UpperCAmelCase ( self : Any ) -> Any:
import torch
_A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A = np.random.rand(1_00, 32 ).astype(np.floataa )
_A = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_A = feature_extractor.pad([{'input_features': inputs}], return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_A = feature_extractor.pad([{'input_features': inputs}], return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __UpperCAmelCase ( self : int, UpperCamelCase__ : int ) -> Optional[Any]:
_A = load_dataset('hf-internal-testing/librispeech_asr_dummy', 'clean', split='validation' )
# automatic decoding with librispeech
_A = ds.sort('id' ).select(range(UpperCamelCase__ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
# fmt: off
_A = torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
_A = self._load_datasamples(1 )
_A = WhisperFeatureExtractor()
_A = feature_extractor(UpperCamelCase__, return_tensors='pt' ).input_features
self.assertEqual(input_features.shape, (1, 80, 30_00) )
self.assertTrue(torch.allclose(input_features[0, 0, :30], UpperCamelCase__, atol=1e-4 ) )
def __UpperCAmelCase ( self : int ) -> Any:
_A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A = self._load_datasamples(1 )[0]
_A = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue
_A = feat_extract.zero_mean_unit_var_norm([audio], attention_mask=UpperCamelCase__ )[0]
self.assertTrue(np.all(np.mean(UpperCamelCase__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCamelCase__ ) - 1 ) < 1e-3 ) )
| 107 | 0 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( __a ):
def __init__( self : Any , a__ : int=None , **a__ : List[str] ):
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , a__ , )
super().__init__(args=a__ , **a__ )
| 715 |
'''simple docstring'''
from typing import List
import numpy as np
def UpperCamelCase ( a ) -> int:
'''simple docstring'''
__magic_name__ = {key: len(a ) for key, value in gen_kwargs.items() if isinstance(a , a )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
__magic_name__ = max(lists_lengths.values() , default=0 )
return max(1 , a )
def UpperCamelCase ( a , a ) -> List[range]:
'''simple docstring'''
__magic_name__ = []
for group_idx in range(a ):
__magic_name__ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
__magic_name__ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
__magic_name__ = range(a , start + num_shards_to_add )
shards_indices_per_group.append(a )
return shards_indices_per_group
def UpperCamelCase ( a , a ) -> List[dict]:
'''simple docstring'''
__magic_name__ = _number_of_shards_in_gen_kwargs(a )
if num_shards == 1:
return [dict(a )]
else:
__magic_name__ = _distribute_shards(num_shards=a , max_num_jobs=a )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(a , a )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(a ) )
]
def UpperCamelCase ( a ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , a )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def UpperCamelCase ( a , a ) -> dict:
'''simple docstring'''
__magic_name__ = {len(a ) for value in gen_kwargs.values() if isinstance(a , a )}
__magic_name__ = {}
for size in list_sizes:
__magic_name__ = list(range(a ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
__magic_name__ = dict(a )
for key, value in shuffled_kwargs.items():
if isinstance(a , a ):
__magic_name__ = [value[i] for i in indices_per_size[len(a )]]
return shuffled_kwargs
| 245 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.