code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ):
lowerCamelCase__ = len(snake_case__ )
lowerCamelCase__ = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
lowerCamelCase__ = y_points[i]
for i in range(2 , snake_case__ ):
for j in range(snake_case__ , snake_case__ ):
lowerCamelCase__ = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def A__ ( __lowerCAmelCase : Union[str, Any] ):
if hor == 128:
lowerCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
lowerCamelCase__ = (32, 128, 256)
lowerCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
lowerCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
lowerCamelCase__ = (32, 64, 128, 256)
lowerCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
lowerCamelCase__ = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
lowerCamelCase__ = model.state_dict()
lowerCamelCase__ = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_5536,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
lowerCamelCase__ = UNetaDModel(**__lowerCAmelCase )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase__ = state_dict.pop(__lowerCAmelCase )
hf_value_function.load_state_dict(__lowerCAmelCase )
torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , """w""" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( ):
lowerCamelCase__ = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 128, 256),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_5536,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
lowerCamelCase__ = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
lowerCamelCase__ = model
lowerCamelCase__ = UNetaDModel(**__lowerCAmelCase )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase__ = state_dict.pop(__lowerCAmelCase )
hf_value_function.load_state_dict(__lowerCAmelCase )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 9 | 0 |
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[str] ):
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase = "▁" ,_lowerCAmelCase = True ,_lowerCAmelCase = "<unk>" ,_lowerCAmelCase = "</s>" ,_lowerCAmelCase = "<pad>" ,):
lowerCamelCase__ = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
lowerCamelCase__ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
lowerCamelCase__ = token_dict["""token"""]
lowerCamelCase__ = Tokenizer(Unigram() )
lowerCamelCase__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ) ,""" """ ),
normalizers.Lowercase(),
] )
lowerCamelCase__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ),
pre_tokenizers.Digits(individual_digits=_lowerCAmelCase ),
pre_tokenizers.Punctuation(),
] )
lowerCamelCase__ = decoders.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase )
lowerCamelCase__ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' ,special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] ,)
lowerCamelCase__ = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,):
lowerCamelCase__ = trainers.UnigramTrainer(
vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,)
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = [files]
self._tokenizer.train(_lowerCAmelCase ,trainer=_lowerCAmelCase )
self.add_unk_id()
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,):
lowerCamelCase__ = trainers.UnigramTrainer(
vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,)
self._tokenizer.train_from_iterator(_lowerCAmelCase ,trainer=_lowerCAmelCase )
self.add_unk_id()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = json.loads(self._tokenizer.to_str() )
lowerCamelCase__ = self.special_tokens["""unk"""]["""id"""]
lowerCamelCase__ = Tokenizer.from_str(json.dumps(_lowerCAmelCase ) )
| 9 | 0 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase : Union[str, Any] = 'src/diffusers'
UpperCamelCase : Any = '.'
# This is to make sure the diffusers module imported is the one in the repo.
UpperCamelCase : str = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCamelCase : List[str] = spec.loader.load_module()
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ):
return line.startswith(__lowerCAmelCase ) or len(__lowerCAmelCase ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , __lowerCAmelCase ) is not None
def A__ ( __lowerCAmelCase : Dict ):
lowerCamelCase__ = object_name.split(""".""" )
lowerCamelCase__ = 0
# First let's find the module where our object lives.
lowerCamelCase__ = parts[i]
while i < len(__lowerCAmelCase ) and not os.path.isfile(os.path.join(__lowerCAmelCase , F'''{module}.py''' ) ):
i += 1
if i < len(__lowerCAmelCase ):
lowerCamelCase__ = os.path.join(__lowerCAmelCase , parts[i] )
if i >= len(__lowerCAmelCase ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(__lowerCAmelCase , F'''{module}.py''' ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.readlines()
# Now let's find the class / func in the code!
lowerCamelCase__ = """"""
lowerCamelCase__ = 0
for name in parts[i + 1 :]:
while (
line_index < len(__lowerCAmelCase ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__lowerCAmelCase ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCamelCase__ = line_index
while line_index < len(__lowerCAmelCase ) and _should_continue(lines[line_index] , __lowerCAmelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCamelCase__ = lines[start_index:line_index]
return "".join(__lowerCAmelCase )
UpperCamelCase : int = re.compile(r'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
UpperCamelCase : Optional[Any] = re.compile(r'^\s*(\S+)->(\S+)(\s+.*|$)')
UpperCamelCase : Dict = re.compile(r'<FILL\s+[^>]*>')
def A__ ( __lowerCAmelCase : Dict ):
lowerCamelCase__ = code.split("""\n""" )
lowerCamelCase__ = 0
while idx < len(__lowerCAmelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__lowerCAmelCase ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def A__ ( __lowerCAmelCase : Any ):
lowerCamelCase__ = len(get_indent(__lowerCAmelCase ) ) > 0
if has_indent:
lowerCamelCase__ = F'''class Bla:\n{code}'''
lowerCamelCase__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=__lowerCAmelCase )
lowerCamelCase__ = black.format_str(__lowerCAmelCase , mode=__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = style_docstrings_in_code(__lowerCAmelCase )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int=False ):
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.readlines()
lowerCamelCase__ = []
lowerCamelCase__ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__lowerCAmelCase ):
lowerCamelCase__ = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = search.groups()
lowerCamelCase__ = find_code_in_diffusers(__lowerCAmelCase )
lowerCamelCase__ = get_indent(__lowerCAmelCase )
lowerCamelCase__ = line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCamelCase__ = theoretical_indent
lowerCamelCase__ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCamelCase__ = True
while line_index < len(__lowerCAmelCase ) and should_continue:
line_index += 1
if line_index >= len(__lowerCAmelCase ):
break
lowerCamelCase__ = lines[line_index]
lowerCamelCase__ = _should_continue(__lowerCAmelCase , __lowerCAmelCase ) and re.search(F'''^{indent}# End copy''' , __lowerCAmelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCamelCase__ = lines[start_index:line_index]
lowerCamelCase__ = """""".join(__lowerCAmelCase )
# Remove any nested `Copied from` comments to avoid circular copies
lowerCamelCase__ = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(__lowerCAmelCase ) is None]
lowerCamelCase__ = """\n""".join(__lowerCAmelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__lowerCAmelCase ) > 0:
lowerCamelCase__ = replace_pattern.replace("""with""" , """""" ).split(""",""" )
lowerCamelCase__ = [_re_replace_pattern.search(__lowerCAmelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = pattern.groups()
lowerCamelCase__ = re.sub(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if option.strip() == "all-casing":
lowerCamelCase__ = re.sub(obja.lower() , obja.lower() , __lowerCAmelCase )
lowerCamelCase__ = re.sub(obja.upper() , obja.upper() , __lowerCAmelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCamelCase__ = blackify(lines[start_index - 1] + theoretical_code )
lowerCamelCase__ = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowerCamelCase__ = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCamelCase__ = start_index + 1
if overwrite and len(__lowerCAmelCase ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__lowerCAmelCase )
return diffs
def A__ ( __lowerCAmelCase : bool = False ):
lowerCamelCase__ = glob.glob(os.path.join(__lowerCAmelCase , """**/*.py""" ) , recursive=__lowerCAmelCase )
lowerCamelCase__ = []
for filename in all_files:
lowerCamelCase__ = is_copy_consistent(__lowerCAmelCase , __lowerCAmelCase )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(__lowerCAmelCase ) > 0:
lowerCamelCase__ = """\n""".join(__lowerCAmelCase )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase : Any = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 701 |
'''simple docstring'''
from __future__ import annotations
import math
def A__ ( __lowerCAmelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
UpperCamelCase : str = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def A__ ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
lowerCamelCase__ = []
for num in range(len(__lowerCAmelCase ) ):
lowerCamelCase__ = 0
while 2 * i * i <= odd_composites[num]:
lowerCamelCase__ = odd_composites[num] - 2 * i * i
if is_prime(__lowerCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__lowerCAmelCase ) == n:
return list_nums
return []
def A__ ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 9 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Optional[int] = {
"""configuration_clap""": [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapAudioConfig""",
"""ClapConfig""",
"""ClapTextConfig""",
],
"""processing_clap""": ["""ClapProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapModel""",
"""ClapPreTrainedModel""",
"""ClapTextModel""",
"""ClapTextModelWithProjection""",
"""ClapAudioModel""",
"""ClapAudioModelWithProjection""",
]
UpperCamelCase : Tuple = ["""ClapFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
UpperCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 702 |
'''simple docstring'''
def A__ ( ):
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(__lowerCAmelCase , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 9 | 0 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : int = 400_0000 ):
lowerCamelCase__ = [0, 1]
lowerCamelCase__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase__ = 0
for j in range(len(_lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'{solution() = }')
| 703 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'}
UpperCamelCase : Dict = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
UpperCamelCase : List[Any] = {
'camembert-base': 5_12,
}
UpperCamelCase : List[str] = '▁'
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = ['input_ids', 'attention_mask']
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="<unk>" ,_lowerCAmelCase="<pad>" ,_lowerCAmelCase="<mask>" ,_lowerCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] ,_lowerCAmelCase = None ,**_lowerCAmelCase ,):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else mask_token
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase ,eos_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,additional_special_tokens=_lowerCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_lowerCAmelCase ,)
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
lowerCamelCase__ = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowerCamelCase__ = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3}
lowerCamelCase__ = len(self.fairseq_tokens_to_ids )
lowerCamelCase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
lowerCamelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase ,token_ids_a=_lowerCAmelCase ,already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase_ ( self ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
return self.sp_model.encode(_lowerCAmelCase ,out_type=_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_lowerCAmelCase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = []
lowerCamelCase__ = """"""
lowerCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowerCamelCase__ = True
lowerCamelCase__ = []
else:
current_sub_tokens.append(_lowerCAmelCase )
lowerCamelCase__ = False
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def __getstate__( self ):
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
return state
def __setstate__( self ,_lowerCAmelCase ):
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ = os.path.join(
_lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase ,"""wb""" ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 9 | 0 |
'''simple docstring'''
from math import pi, sqrt, tan
def A__ ( __lowerCAmelCase : float ):
if side_length < 0:
raise ValueError("""surface_area_cube() only accepts non-negative values""" )
return 6 * side_length**2
def A__ ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("""surface_area_cuboid() only accepts non-negative values""" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def A__ ( __lowerCAmelCase : float ):
if radius < 0:
raise ValueError("""surface_area_sphere() only accepts non-negative values""" )
return 4 * pi * radius**2
def A__ ( __lowerCAmelCase : float ):
if radius < 0:
raise ValueError("""surface_area_hemisphere() only accepts non-negative values""" )
return 3 * pi * radius**2
def A__ ( __lowerCAmelCase : float , __lowerCAmelCase : float ):
if radius < 0 or height < 0:
raise ValueError("""surface_area_cone() only accepts non-negative values""" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def A__ ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"""surface_area_conical_frustum() only accepts non-negative values""" )
lowerCamelCase__ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def A__ ( __lowerCAmelCase : float , __lowerCAmelCase : float ):
if radius < 0 or height < 0:
raise ValueError("""surface_area_cylinder() only accepts non-negative values""" )
return 2 * pi * radius * (height + radius)
def A__ ( __lowerCAmelCase : float , __lowerCAmelCase : float ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("""surface_area_torus() only accepts non-negative values""" )
if torus_radius < tube_radius:
raise ValueError(
"""surface_area_torus() does not support spindle or self intersecting tori""" )
return 4 * pow(__lowerCAmelCase , 2 ) * torus_radius * tube_radius
def A__ ( __lowerCAmelCase : float , __lowerCAmelCase : float ):
if length < 0 or width < 0:
raise ValueError("""area_rectangle() only accepts non-negative values""" )
return length * width
def A__ ( __lowerCAmelCase : float ):
if side_length < 0:
raise ValueError("""area_square() only accepts non-negative values""" )
return side_length**2
def A__ ( __lowerCAmelCase : float , __lowerCAmelCase : float ):
if base < 0 or height < 0:
raise ValueError("""area_triangle() only accepts non-negative values""" )
return (base * height) / 2
def A__ ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("""area_triangle_three_sides() only accepts non-negative values""" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("""Given three sides do not form a triangle""" )
lowerCamelCase__ = (sidea + sidea + sidea) / 2
lowerCamelCase__ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def A__ ( __lowerCAmelCase : float , __lowerCAmelCase : float ):
if base < 0 or height < 0:
raise ValueError("""area_parallelogram() only accepts non-negative values""" )
return base * height
def A__ ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("""area_trapezium() only accepts non-negative values""" )
return 1 / 2 * (basea + basea) * height
def A__ ( __lowerCAmelCase : float ):
if radius < 0:
raise ValueError("""area_circle() only accepts non-negative values""" )
return pi * radius**2
def A__ ( __lowerCAmelCase : float , __lowerCAmelCase : float ):
if radius_x < 0 or radius_y < 0:
raise ValueError("""area_ellipse() only accepts non-negative values""" )
return pi * radius_x * radius_y
def A__ ( __lowerCAmelCase : float , __lowerCAmelCase : float ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("""area_rhombus() only accepts non-negative values""" )
return 1 / 2 * diagonal_a * diagonal_a
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : float ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or sides < 3:
raise ValueError(
"""area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides""" )
elif length < 0:
raise ValueError(
"""area_reg_polygon() only accepts non-negative values as \
length of a side""" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F'Rectangle: {area_rectangle(10, 20) = }')
print(F'Square: {area_square(10) = }')
print(F'Triangle: {area_triangle(10, 10) = }')
print(F'Triangle: {area_triangle_three_sides(5, 12, 13) = }')
print(F'Parallelogram: {area_parallelogram(10, 20) = }')
print(F'Rhombus: {area_rhombus(10, 20) = }')
print(F'Trapezium: {area_trapezium(10, 20, 30) = }')
print(F'Circle: {area_circle(20) = }')
print(F'Ellipse: {area_ellipse(10, 20) = }')
print('\nSurface Areas of various geometric shapes: \n')
print(F'Cube: {surface_area_cube(20) = }')
print(F'Cuboid: {surface_area_cuboid(10, 20, 30) = }')
print(F'Sphere: {surface_area_sphere(20) = }')
print(F'Hemisphere: {surface_area_hemisphere(20) = }')
print(F'Cone: {surface_area_cone(10, 20) = }')
print(F'Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }')
print(F'Cylinder: {surface_area_cylinder(10, 20) = }')
print(F'Torus: {surface_area_torus(20, 10) = }')
print(F'Equilateral Triangle: {area_reg_polygon(3, 10) = }')
print(F'Square: {area_reg_polygon(4, 10) = }')
print(F'Reqular Pentagon: {area_reg_polygon(5, 10) = }')
| 704 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = R"""\w+[.]\d+"""
lowerCamelCase__ = re.findall(__lowerCAmelCase , __lowerCAmelCase )
for pat in pats:
lowerCamelCase__ = key.replace(__lowerCAmelCase , """_""".join(pat.split(""".""" ) ) )
return key
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ):
lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCamelCase__ = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
lowerCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase__ = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase__ = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple=42 ):
# Step 1: Convert pytorch tensor to numpy
lowerCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCamelCase__ = flax_model.init_weights(PRNGKey(__lowerCAmelCase ) )
lowerCamelCase__ = flatten_dict(__lowerCAmelCase )
lowerCamelCase__ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase__ = rename_key(__lowerCAmelCase )
lowerCamelCase__ = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
lowerCamelCase__ , lowerCamelCase__ = rename_key_and_reshape_tensor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
lowerCamelCase__ = jnp.asarray(__lowerCAmelCase )
return unflatten_dict(__lowerCAmelCase )
| 9 | 0 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
UpperCamelCase : List[Any] = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
UpperCamelCase : Dict = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
UpperCamelCase : Union[str, Any] = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ (datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ,id="""sequence""" ),
"""references""": datasets.Value("""string""" ,id="""sequence""" ),
} ) ,codebase_urls=["""https://github.com/jitsi/jiwer/"""] ,reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=False ):
if concatenate_texts:
return compute_measures(a_ ,a_ )["wer"]
else:
lowerCamelCase__ = 0
lowerCamelCase__ = 0
for prediction, reference in zip(a_ ,a_ ):
lowerCamelCase__ = compute_measures(a_ ,a_ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 705 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] ,model_result["""ss"""] ):
lowerCamelCase__ = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sgugger/tiny-distilbert-classification"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,only_pretrain_model=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """patrickvonplaten/t5-tiny-random"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,configs=[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,"""Cannot do xla on CPU.""" )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=_lowerCAmelCase ,save_to_csv=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ,inference_memory_csv_file=os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ,env_info_csv_file=os.path.join(_lowerCAmelCase ,"""env.csv""" ) ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""env.csv""" ) ).exists() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(_lowerCAmelCase ):
self.assertTrue(hasattr(_lowerCAmelCase ,"""sequential""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""cumulative""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""current""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(_lowerCAmelCase ,"""log.txt""" ) ,log_print=_lowerCAmelCase ,trace_memory_line_by_line=_lowerCAmelCase ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""log.txt""" ) ).exists() )
| 9 | 0 |
'''simple docstring'''
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {}
UpperCamelCase : Dict = {}
UpperCamelCase : Any = {}
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] = None , ):
lowerCamelCase__ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
lowerCamelCase__ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
lowerCamelCase__ = format_type
def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple = None ):
lowerCamelCase__ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
lowerCamelCase__ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
UpperCamelCase : List[Any] = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
UpperCamelCase : int = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
UpperCamelCase : Optional[Any] = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def A__ ( __lowerCAmelCase : Union[str, Any] ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def A__ ( __lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Tuple ):
lowerCamelCase__ = get_format_type_from_alias(__snake_case )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__snake_case )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 706 |
'''simple docstring'''
from math import factorial
UpperCamelCase : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def A__ ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__lowerCAmelCase ) )
def A__ ( __lowerCAmelCase : int = 60 , __lowerCAmelCase : int = 100_0000 ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
lowerCamelCase__ = 0
# the cached sizes of the previous chains
lowerCamelCase__ = {}
for start_chain_element in range(1 , __lowerCAmelCase ):
# The temporary set will contain the elements of the chain
lowerCamelCase__ = set()
lowerCamelCase__ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCamelCase__ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__lowerCAmelCase )
chain_set_length += 1
lowerCamelCase__ = digit_factorial_sum(__lowerCAmelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCamelCase__ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution()}')
| 9 | 0 |
'''simple docstring'''
import enum
import shutil
import sys
UpperCamelCase : List[Any] = shutil.get_terminal_size()
UpperCamelCase : Any = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"}
class UpperCamelCase__ (enum.Enum ):
'''simple docstring'''
_UpperCamelCase = 0
_UpperCamelCase = 1
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Tuple="" ):
sys.stdout.write(str(UpperCAmelCase__ ) + end )
sys.stdout.flush()
def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict="" ):
forceWrite(F'''\u001b[{color}m{content}\u001b[0m''' , UpperCAmelCase__ )
def A__ ( ):
forceWrite("""\r""" )
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : str ):
forceWrite(F'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' )
def A__ ( ):
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def A__ ( ):
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 707 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
UpperCamelCase : Optional[Any] = 'src/diffusers'
# Matches is_xxx_available()
UpperCamelCase : Union[str, Any] = re.compile(r'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
UpperCamelCase : Optional[Any] = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
UpperCamelCase : Optional[int] = '\n{0} = None\n'
UpperCamelCase : Optional[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
UpperCamelCase : Any = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def A__ ( __lowerCAmelCase : Union[str, Any] ):
lowerCamelCase__ = _re_backend.findall(__lowerCAmelCase )
if len(__lowerCAmelCase ) == 0:
return None
return "_and_".join(__lowerCAmelCase )
def A__ ( ):
with open(os.path.join(__lowerCAmelCase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase__ = 0
lowerCamelCase__ = {}
# Go through the end of the file
while line_index < len(__lowerCAmelCase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
lowerCamelCase__ = []
# Until we unindent, add backend objects to the list
while line_index < len(__lowerCAmelCase ) and len(lines[line_index] ) > 1:
lowerCamelCase__ = lines[line_index]
lowerCamelCase__ = _re_single_line_import.search(__lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__lowerCAmelCase ) > 0:
lowerCamelCase__ = objects
else:
line_index += 1
return backend_specific_objects
def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict ):
if name.isupper():
return DUMMY_CONSTANT.format(__lowerCAmelCase )
elif name.islower():
return DUMMY_FUNCTION.format(__lowerCAmelCase , __lowerCAmelCase )
else:
return DUMMY_CLASS.format(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : Optional[int]=None ):
if backend_specific_objects is None:
lowerCamelCase__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase__ = {}
for backend, objects in backend_specific_objects.items():
lowerCamelCase__ = """[""" + """, """.join(F'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]"""
lowerCamelCase__ = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__lowerCAmelCase , __lowerCAmelCase ) for o in objects] )
lowerCamelCase__ = dummy_file
return dummy_files
def A__ ( __lowerCAmelCase : List[str]=False ):
lowerCamelCase__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase__ = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
lowerCamelCase__ = os.path.join(__lowerCAmelCase , """utils""" )
lowerCamelCase__ = {
backend: os.path.join(__lowerCAmelCase , F'''dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py''' )
for backend in dummy_files.keys()
}
lowerCamelCase__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__lowerCAmelCase ):
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.read()
else:
lowerCamelCase__ = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py as the main '''
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
F'''diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py. Run `make fix-copies` '''
"""to fix this.""" )
if __name__ == "__main__":
UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase : Any = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 9 | 0 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=7 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=99 ,_lowerCAmelCase=64 ,_lowerCAmelCase=5 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=5_12 ,_lowerCAmelCase=16 ,_lowerCAmelCase=2 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=3 ,_lowerCAmelCase=4 ,_lowerCAmelCase=None ,):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
lowerCamelCase__ = vocab_size - 1
def UpperCamelCase_ ( self ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowerCamelCase__ = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCamelCase_ ( self ):
return GPTNeoXConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCAmelCase__ ,initializer_range=self.initializer_range ,pad_token_id=self.pad_token_id ,)
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ = True
return config, input_ids, input_mask, token_labels
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = GPTNeoXModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCamelCase__ = model(lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ )
lowerCamelCase__ = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = True
lowerCamelCase__ = GPTNeoXModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCamelCase__ = model(lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = GPTNeoXForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCamelCase__ = model(lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ ,labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = GPTNeoXForQuestionAnswering(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCamelCase__ = model(lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = GPTNeoXForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCamelCase__ = model(lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ ,labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = GPTNeoXForTokenClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCamelCase__ = model(lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ ,labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = True
lowerCamelCase__ = GPTNeoXForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# first forward pass
lowerCamelCase__ = model(lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ ,use_cache=lowerCAmelCase__ )
lowerCamelCase__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowerCamelCase__ = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
lowerCamelCase__ = torch.cat([input_ids, next_tokens] ,dim=-1 )
lowerCamelCase__ = torch.cat([input_mask, next_mask] ,dim=-1 )
lowerCamelCase__ = model(lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ ,output_hidden_states=lowerCAmelCase__ )
lowerCamelCase__ = output_from_no_past["hidden_states"][0]
lowerCamelCase__ = model(
lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ ,past_key_values=lowerCAmelCase__ ,output_hidden_states=lowerCAmelCase__ ,)["hidden_states"][0]
# select random slice
lowerCamelCase__ = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
lowerCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,atol=1E-3 ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ (a__ ,a__ ,a__ ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCamelCase = (GPTNeoXForCausalLM,) if is_torch_available() else ()
_UpperCamelCase = (
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def UpperCamelCase_ ( self ):
lowerCamelCase__ = GPTNeoXModelTester(self )
lowerCamelCase__ = ConfigTester(self ,config_class=lowerCAmelCase__ ,hidden_size=64 ,num_attention_heads=8 )
def UpperCamelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
def UpperCamelCase_ ( self ):
# This regression test was failing with PyTorch < 1.3
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCamelCase__ = None
self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCAmelCase__ )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def UpperCamelCase_ ( self ):
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = ids_tensor([1, 10] ,config.vocab_size )
lowerCamelCase__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ = GPTNeoXModel(lowerCAmelCase__ )
original_model.to(lowerCAmelCase__ )
original_model.eval()
lowerCamelCase__ = original_model(lowerCAmelCase__ ).last_hidden_state
lowerCamelCase__ = original_model(lowerCAmelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ = {"type": scaling_type, "factor": 10.0}
lowerCamelCase__ = GPTNeoXModel(lowerCAmelCase__ )
scaled_model.to(lowerCAmelCase__ )
scaled_model.eval()
lowerCamelCase__ = scaled_model(lowerCAmelCase__ ).last_hidden_state
lowerCamelCase__ = scaled_model(lowerCAmelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,atol=1E-5 ) )
@require_torch
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
lowerCamelCase__ = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCAmelCase__ )
lowerCamelCase__ = tokenizer("""My favorite food is""" ,return_tensors="""pt""" ).to(lowerCAmelCase__ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
lowerCamelCase__ = "My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"
lowerCamelCase__ = model.generate(**lowerCAmelCase__ ,do_sample=lowerCAmelCase__ ,max_new_tokens=20 )
lowerCamelCase__ = tokenizer.batch_decode(lowerCAmelCase__ )[0]
self.assertEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
| 708 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase : List[Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ (a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = GPTSwaTokenizer
_UpperCamelCase = False
_UpperCamelCase = True
_UpperCamelCase = False
def UpperCamelCase_ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ,eos_token="""<unk>""" ,bos_token="""<unk>""" ,pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = """This is a test"""
lowerCamelCase__ = """This is a test"""
return input_text, output_text
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """<s>"""
lowerCamelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) ,_lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""j""" )
self.assertEqual(len(_lowerCAmelCase ) ,20_00 )
def UpperCamelCase_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,20_00 )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase )
lowerCamelCase__ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowerCAmelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) ,[4_65, 2_87, 2_65, 6_31, 8_42] )
lowerCamelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
_lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ,)
# fmt: on
lowerCamelCase__ = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase ,[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] ,)
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
# fmt: off
self.assertListEqual(
_lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def UpperCamelCase_ ( self ):
lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase )
lowerCamelCase__ = ["""This is a test""", """I was born in 92000, and this is falsé."""]
lowerCamelCase__ = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ):
self.assertListEqual(tokenizer.encode_fast(_lowerCAmelCase ) ,_lowerCAmelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ):
self.assertEqual(tokenizer.decode_fast(_lowerCAmelCase ) ,_lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
lowerCamelCase__ = {"""input_ids""": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase ,model_name="""AI-Sweden/gpt-sw3-126m""" ,sequences=_lowerCAmelCase ,)
| 9 | 0 |
'''simple docstring'''
import numpy as np
def A__ ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : float ):
return np.where(vector > 0 , UpperCAmelCase__ , (alpha * (np.exp(UpperCAmelCase__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
'''simple docstring'''
from manim import *
class UpperCamelCase__ (a ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
lowerCamelCase__ = Rectangle(height=0.5 ,width=0.5 )
lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = VGroup(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""CPU""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCAmelCase )
lowerCamelCase__ = [mem.copy() for i in range(1 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""GPU""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
gpu.align_to(_lowerCAmelCase ,_lowerCAmelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_lowerCAmelCase )
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""Model""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,)
lowerCamelCase__ = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' ,font_size=24 ,)
lowerCamelCase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase__ = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCAmelCase ,run_time=2.5 ) ,Write(_lowerCAmelCase ) ,Write(_lowerCAmelCase ) )
self.add(_lowerCAmelCase )
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
for i, rect in enumerate(_lowerCAmelCase ):
lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase ,opacity=0.7 )
cpu_target.move_to(_lowerCAmelCase )
cpu_target.generate_target()
lowerCamelCase__ = 0.46 / 4
lowerCamelCase__ = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=_lowerCAmelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=_lowerCAmelCase ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=_lowerCAmelCase ,buff=0.0 )
cpu_targs.append(_lowerCAmelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCAmelCase ) )
second_animations.append(MoveToTarget(_lowerCAmelCase ,run_time=1.5 ) )
self.play(*_lowerCAmelCase )
self.play(*_lowerCAmelCase )
self.wait()
| 9 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class UpperCamelCase__ (__UpperCAmelCase ):
'''simple docstring'''
_UpperCamelCase = """camembert"""
def __init__( self ,_lowerCAmelCase=3_05_22 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=30_72 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=5_12 ,_lowerCAmelCase=2 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-12 ,_lowerCAmelCase=1 ,_lowerCAmelCase=0 ,_lowerCAmelCase=2 ,_lowerCAmelCase="absolute" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,):
super().__init__(pad_token_id=UpperCAmelCase_ ,bos_token_id=UpperCAmelCase_ ,eos_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = position_embedding_type
lowerCamelCase__ = use_cache
lowerCamelCase__ = classifier_dropout
class UpperCamelCase__ (__UpperCAmelCase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ):
if self.task == "multiple-choice":
lowerCamelCase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 710 |
'''simple docstring'''
UpperCamelCase : Tuple = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
UpperCamelCase : list[bool | None] = [None] * 10_00_00_00
UpperCamelCase : Tuple = True
UpperCamelCase : Optional[int] = False
def A__ ( __lowerCAmelCase : int ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase__ = chain(next_number(__lowerCAmelCase ) )
lowerCamelCase__ = number_chain
while number < 1000_0000:
lowerCamelCase__ = number_chain
number *= 10
return number_chain
def A__ ( __lowerCAmelCase : int = 1000_0000 ):
for i in range(1 , __lowerCAmelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution() = }')
| 9 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class UpperCamelCase__ (unittest.TestCase ):
def UpperCamelCase_ ( self ):
lowerCamelCase__ = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_28, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_42, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
lowerCamelCase__ = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 1_28,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 1_42,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(UpperCamelCase__ ) ,UpperCamelCase__ )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = np.random.randn(3 ,4 )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ ) ,x.transpose() ) )
lowerCamelCase__ = np.random.randn(3 ,4 ,5 )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ ,axes=(1, 2, 0) ) ,x.transpose((1, 2, 0) ) ) )
@require_torch
def UpperCamelCase_ ( self ):
lowerCamelCase__ = np.random.randn(3 ,4 )
lowerCamelCase__ = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ ) ,transpose(UpperCamelCase__ ).numpy() ) )
lowerCamelCase__ = np.random.randn(3 ,4 ,5 )
lowerCamelCase__ = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ ,axes=(1, 2, 0) ) ,transpose(UpperCamelCase__ ,axes=(1, 2, 0) ).numpy() ) )
@require_tf
def UpperCamelCase_ ( self ):
lowerCamelCase__ = np.random.randn(3 ,4 )
lowerCamelCase__ = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ ) ,transpose(UpperCamelCase__ ).numpy() ) )
lowerCamelCase__ = np.random.randn(3 ,4 ,5 )
lowerCamelCase__ = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ ,axes=(1, 2, 0) ) ,transpose(UpperCamelCase__ ,axes=(1, 2, 0) ).numpy() ) )
@require_flax
def UpperCamelCase_ ( self ):
lowerCamelCase__ = np.random.randn(3 ,4 )
lowerCamelCase__ = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ ) ,np.asarray(transpose(UpperCamelCase__ ) ) ) )
lowerCamelCase__ = np.random.randn(3 ,4 ,5 )
lowerCamelCase__ = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ ,axes=(1, 2, 0) ) ,np.asarray(transpose(UpperCamelCase__ ,axes=(1, 2, 0) ) ) ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = np.random.randn(3 ,4 )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ ,(4, 3) ) ,np.reshape(UpperCamelCase__ ,(4, 3) ) ) )
lowerCamelCase__ = np.random.randn(3 ,4 ,5 )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ ,(12, 5) ) ,np.reshape(UpperCamelCase__ ,(12, 5) ) ) )
@require_torch
def UpperCamelCase_ ( self ):
lowerCamelCase__ = np.random.randn(3 ,4 )
lowerCamelCase__ = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ ,(4, 3) ) ,reshape(UpperCamelCase__ ,(4, 3) ).numpy() ) )
lowerCamelCase__ = np.random.randn(3 ,4 ,5 )
lowerCamelCase__ = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ ,(12, 5) ) ,reshape(UpperCamelCase__ ,(12, 5) ).numpy() ) )
@require_tf
def UpperCamelCase_ ( self ):
lowerCamelCase__ = np.random.randn(3 ,4 )
lowerCamelCase__ = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ ,(4, 3) ) ,reshape(UpperCamelCase__ ,(4, 3) ).numpy() ) )
lowerCamelCase__ = np.random.randn(3 ,4 ,5 )
lowerCamelCase__ = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ ,(12, 5) ) ,reshape(UpperCamelCase__ ,(12, 5) ).numpy() ) )
@require_flax
def UpperCamelCase_ ( self ):
lowerCamelCase__ = np.random.randn(3 ,4 )
lowerCamelCase__ = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ ,(4, 3) ) ,np.asarray(reshape(UpperCamelCase__ ,(4, 3) ) ) ) )
lowerCamelCase__ = np.random.randn(3 ,4 ,5 )
lowerCamelCase__ = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ ,(12, 5) ) ,np.asarray(reshape(UpperCamelCase__ ,(12, 5) ) ) ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = np.random.randn(1 ,3 ,4 )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ ) ,np.squeeze(UpperCamelCase__ ) ) )
lowerCamelCase__ = np.random.randn(1 ,4 ,1 ,5 )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ ,axis=2 ) ,np.squeeze(UpperCamelCase__ ,axis=2 ) ) )
@require_torch
def UpperCamelCase_ ( self ):
lowerCamelCase__ = np.random.randn(1 ,3 ,4 )
lowerCamelCase__ = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ ) ,squeeze(UpperCamelCase__ ).numpy() ) )
lowerCamelCase__ = np.random.randn(1 ,4 ,1 ,5 )
lowerCamelCase__ = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ ,axis=2 ) ,squeeze(UpperCamelCase__ ,axis=2 ).numpy() ) )
@require_tf
def UpperCamelCase_ ( self ):
lowerCamelCase__ = np.random.randn(1 ,3 ,4 )
lowerCamelCase__ = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ ) ,squeeze(UpperCamelCase__ ).numpy() ) )
lowerCamelCase__ = np.random.randn(1 ,4 ,1 ,5 )
lowerCamelCase__ = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ ,axis=2 ) ,squeeze(UpperCamelCase__ ,axis=2 ).numpy() ) )
@require_flax
def UpperCamelCase_ ( self ):
lowerCamelCase__ = np.random.randn(1 ,3 ,4 )
lowerCamelCase__ = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ ) ,np.asarray(squeeze(UpperCamelCase__ ) ) ) )
lowerCamelCase__ = np.random.randn(1 ,4 ,1 ,5 )
lowerCamelCase__ = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ ,axis=2 ) ,np.asarray(squeeze(UpperCamelCase__ ,axis=2 ) ) ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = np.random.randn(3 ,4 )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase__ ,axis=1 ) ,np.expand_dims(UpperCamelCase__ ,axis=1 ) ) )
@require_torch
def UpperCamelCase_ ( self ):
lowerCamelCase__ = np.random.randn(3 ,4 )
lowerCamelCase__ = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase__ ,axis=1 ) ,expand_dims(UpperCamelCase__ ,axis=1 ).numpy() ) )
@require_tf
def UpperCamelCase_ ( self ):
lowerCamelCase__ = np.random.randn(3 ,4 )
lowerCamelCase__ = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase__ ,axis=1 ) ,expand_dims(UpperCamelCase__ ,axis=1 ).numpy() ) )
@require_flax
def UpperCamelCase_ ( self ):
lowerCamelCase__ = np.random.randn(3 ,4 )
lowerCamelCase__ = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase__ ,axis=1 ) ,np.asarray(expand_dims(UpperCamelCase__ ,axis=1 ) ) ) )
| 711 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'donut-swin'
_UpperCamelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self ,_lowerCAmelCase=2_24 ,_lowerCAmelCase=4 ,_lowerCAmelCase=3 ,_lowerCAmelCase=96 ,_lowerCAmelCase=[2, 2, 6, 2] ,_lowerCAmelCase=[3, 6, 12, 24] ,_lowerCAmelCase=7 ,_lowerCAmelCase=4.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=False ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-5 ,**_lowerCAmelCase ,):
super().__init__(**_lowerCAmelCase )
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = embed_dim
lowerCamelCase__ = depths
lowerCamelCase__ = len(_lowerCAmelCase )
lowerCamelCase__ = num_heads
lowerCamelCase__ = window_size
lowerCamelCase__ = mlp_ratio
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = hidden_act
lowerCamelCase__ = use_absolute_embeddings
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
| 9 | 0 |
'''simple docstring'''
from __future__ import annotations
def A__ ( __lowerCAmelCase : Dict ):
if len(__UpperCamelCase ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
lowerCamelCase__ = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
UpperCamelCase : Optional[Any] = ['small', 'medium', 'large']
UpperCamelCase : Dict = 'lm_head.decoder.weight'
UpperCamelCase : int = 'lm_head.weight'
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
lowerCamelCase__ = torch.load(__lowerCAmelCase )
lowerCamelCase__ = d.pop(__lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
UpperCamelCase : Dict = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
UpperCamelCase : Any = os.path.join(args.dialogpt_path, F'{MODEL}_ft.pkl')
UpperCamelCase : str = F'./DialoGPT-{MODEL}'
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 9 | 0 |
'''simple docstring'''
from __future__ import annotations
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : int ):
lowerCamelCase__ = set(__lowerCAmelCase ), [start]
while stack:
lowerCamelCase__ = stack.pop()
explored.add(__lowerCAmelCase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__lowerCAmelCase )
return explored
UpperCamelCase : Dict = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 713 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=30 ,_lowerCAmelCase=2 ,_lowerCAmelCase=3 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=32 ,_lowerCAmelCase=2 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=10 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=3 ,_lowerCAmelCase=0.6 ,_lowerCAmelCase=None ,):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = is_training
lowerCamelCase__ = use_labels
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = mask_ratio
lowerCamelCase__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase__ = (image_size // patch_size) ** 2
lowerCamelCase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCamelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ):
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,decoder_hidden_size=self.hidden_size ,decoder_num_hidden_layers=self.num_hidden_layers ,decoder_num_attention_heads=self.num_attention_heads ,decoder_intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = TFViTMAEModel(config=_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase )
# expected sequence length = num_patches
lowerCamelCase__ = (self.image_size // self.patch_size) ** 2
lowerCamelCase__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase__ = 1
lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase )
lowerCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase )
lowerCamelCase__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.prepare_config_and_inputs()
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = config_and_inputs
lowerCamelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase__ (a ,a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_UpperCamelCase = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def UpperCamelCase_ ( self ):
lowerCamelCase__ = TFViTMAEModelTester(self )
lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase ,hidden_size=37 )
def UpperCamelCase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
lowerCamelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase ,tf.keras.layers.Layer ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ = [*signature.parameters.keys()]
lowerCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
# make the mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
lowerCamelCase__ = copy.deepcopy(self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) )
lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase )
lowerCamelCase__ = outputs_dict[0].numpy()
lowerCamelCase__ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) ,1E-6 )
def UpperCamelCase_ ( self ):
# make the mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_lowerCAmelCase ):
lowerCamelCase__ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_lowerCAmelCase ):
lowerCamelCase__ = v.numpy()
else:
lowerCamelCase__ = np.array(_lowerCAmelCase )
return inputs_np_dict
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = prepare_numpy_arrays(_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase )
self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
# make masks reproducible
np.random.seed(2 )
lowerCamelCase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ = tf.constant(_lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase__ = tf_noise
super().check_pt_tf_models(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_lowerCAmelCase )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(_lowerCAmelCase ,_lowerCAmelCase ),)
if isinstance(_lowerCAmelCase ,_lowerCAmelCase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_lowerCAmelCase ,"""_keras_serializable""" ,_lowerCAmelCase )
}
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ = tf.convert_to_tensor(_lowerCAmelCase )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
lowerCamelCase__ = main_layer_class(_lowerCAmelCase )
lowerCamelCase__ = {
name: tf.keras.Input(tensor.shape[1:] ,dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowerCamelCase__ = tf.keras.Model(_lowerCAmelCase ,outputs=main_layer(_lowerCAmelCase ) )
lowerCamelCase__ = model(_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ = os.path.join(_lowerCAmelCase ,"""keras_model.h5""" )
model.save(_lowerCAmelCase )
lowerCamelCase__ = tf.keras.models.load_model(
_lowerCAmelCase ,custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_lowerCAmelCase ,tf.keras.Model )
lowerCamelCase__ = model(_lowerCAmelCase )
self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
lowerCamelCase__ = outputs.last_hidden_state.numpy()
lowerCamelCase__ = 0
else:
lowerCamelCase__ = outputs.logits.numpy()
lowerCamelCase__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase ,saved_model=_lowerCAmelCase )
lowerCamelCase__ = model_class.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
lowerCamelCase__ = after_outputs["""last_hidden_state"""].numpy()
lowerCamelCase__ = 0
else:
lowerCamelCase__ = after_outputs["""logits"""].numpy()
lowerCamelCase__ = 0
lowerCamelCase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase ,1E-5 )
def UpperCamelCase_ ( self ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
lowerCamelCase__ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_lowerCAmelCase )
lowerCamelCase__ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowerCamelCase__ = model_class.from_config(model.config )
lowerCamelCase__ = new_model(_lowerCAmelCase ) # Build model
new_model.set_weights(model.get_weights() )
lowerCamelCase__ = new_model(_lowerCAmelCase ,noise=_lowerCAmelCase )
self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def UpperCamelCase_ ( self ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def UpperCamelCase_ ( self ):
pass
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(_lowerCAmelCase )
def A__ ( ):
lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCamelCase__ = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
lowerCamelCase__ = self.default_image_processor
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase__ = ViTMAEConfig()
lowerCamelCase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(1, num_patches) )
# forward pass
lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase )
# verify the logits
lowerCamelCase__ = tf.convert_to_tensor([1, 1_96, 7_68] )
self.assertEqual(outputs.logits.shape ,_lowerCAmelCase )
lowerCamelCase__ = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] ,_lowerCAmelCase ,atol=1E-4 )
| 9 | 0 |
from math import factorial, pi
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict = 30 ):
if not isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ):
raise ValueError("""maclaurin_sin() requires either an int or float for theta""" )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or accuracy <= 0:
raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" )
lowerCamelCase__ = float(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(SCREAMING_SNAKE_CASE_ ) )
def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any = 30 ):
if not isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ):
raise ValueError("""maclaurin_cos() requires either an int or float for theta""" )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or accuracy <= 0:
raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" )
lowerCamelCase__ = float(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 714 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=7 ,_lowerCAmelCase=3 ,_lowerCAmelCase=18 ,_lowerCAmelCase=30 ,_lowerCAmelCase=4_00 ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,):
lowerCamelCase__ = size if size is not None else {"""shortest_edge""": 18}
lowerCamelCase__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = min_resolution
lowerCamelCase__ = max_resolution
lowerCamelCase__ = do_resize
lowerCamelCase__ = size
lowerCamelCase__ = do_center_crop
lowerCamelCase__ = crop_size
lowerCamelCase__ = do_normalize
lowerCamelCase__ = image_mean
lowerCamelCase__ = image_std
def UpperCamelCase_ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase__ (a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = LevitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
lowerCamelCase__ = LevitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""image_std""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""size""" ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase ,Image.Image )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase ,np.ndarray )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase ,torch.Tensor )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 9 | 0 |
import os
from math import logaa
def A__ ( __lowerCAmelCase : Dict = "base_exp.txt" ):
lowerCamelCase__ = 0
lowerCamelCase__ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_lowerCAmelCase ) , _lowerCAmelCase ) ) ):
lowerCamelCase__ = list(map(_lowerCAmelCase , line.split(""",""" ) ) )
if x * logaa(_lowerCAmelCase ) > largest:
lowerCamelCase__ = x * logaa(_lowerCAmelCase )
lowerCamelCase__ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 715 |
'''simple docstring'''
import numpy
# List of input, output pairs
UpperCamelCase : List[Any] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCamelCase : Optional[int] = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
UpperCamelCase : int = [2, 4, 1, 5]
UpperCamelCase : int = len(train_data)
UpperCamelCase : Dict = 0.009
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : str="train" ):
return calculate_hypothesis_value(__lowerCAmelCase , __lowerCAmelCase ) - output(
__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : Any ):
lowerCamelCase__ = 0
for i in range(len(__lowerCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=m ):
lowerCamelCase__ = 0
for i in range(__lowerCAmelCase ):
if index == -1:
summation_value += _error(__lowerCAmelCase )
else:
summation_value += _error(__lowerCAmelCase ) * train_data[i][0][index]
return summation_value
def A__ ( __lowerCAmelCase : List[Any] ):
lowerCamelCase__ = summation_of_cost_derivative(__lowerCAmelCase , __lowerCAmelCase ) / m
return cost_derivative_value
def A__ ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCamelCase__ = 0.00_0002
lowerCamelCase__ = 0
lowerCamelCase__ = 0
while True:
j += 1
lowerCamelCase__ = [0, 0, 0, 0]
for i in range(0 , len(__lowerCAmelCase ) ):
lowerCamelCase__ = get_cost_derivative(i - 1 )
lowerCamelCase__ = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__lowerCAmelCase , __lowerCAmelCase , atol=__lowerCAmelCase , rtol=__lowerCAmelCase , ):
break
lowerCamelCase__ = temp_parameter_vector
print(("""Number of iterations:""", j) )
def A__ ( ):
for i in range(len(__lowerCAmelCase ) ):
print(("""Actual output value:""", output(__lowerCAmelCase , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(__lowerCAmelCase , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 9 | 0 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
UpperCamelCase : List[Any] = logging.getLogger(__name__)
@dataclass
class UpperCamelCase__ (UpperCamelCase__ ):
'''simple docstring'''
_UpperCamelCase = field(
default=0.0 ,metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
_UpperCamelCase = field(default=UpperCamelCase__ ,metadata={'help': 'Whether to SortishSamler or not.'} )
_UpperCamelCase = field(
default=UpperCamelCase__ ,metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
_UpperCamelCase = field(default=UpperCamelCase__ ,metadata={'help': 'whether to use adafactor'} )
_UpperCamelCase = field(
default=UpperCamelCase__ ,metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
_UpperCamelCase = field(
default=UpperCamelCase__ ,metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
_UpperCamelCase = field(default=UpperCamelCase__ ,metadata={'help': 'Dropout probability. Goes into model.config.'} )
_UpperCamelCase = field(
default=UpperCamelCase__ ,metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
_UpperCamelCase = field(
default='linear' ,metadata={'help': f"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} ,)
| 716 |
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] ):
lowerCamelCase__ = OmegaConf.load(__lowerCAmelCase )
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )["""model"""]
lowerCamelCase__ = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCamelCase__ = {}
lowerCamelCase__ = """first_stage_model."""
for key in keys:
if key.startswith(__lowerCAmelCase ):
lowerCamelCase__ = state_dict[key]
# extract state_dict for UNetLDM
lowerCamelCase__ = {}
lowerCamelCase__ = """model.diffusion_model."""
for key in keys:
if key.startswith(__lowerCAmelCase ):
lowerCamelCase__ = state_dict[key]
lowerCamelCase__ = config.model.params.first_stage_config.params
lowerCamelCase__ = config.model.params.unet_config.params
lowerCamelCase__ = VQModel(**__lowerCAmelCase ).eval()
vqvae.load_state_dict(__lowerCAmelCase )
lowerCamelCase__ = UNetLDMModel(**__lowerCAmelCase ).eval()
unet.load_state_dict(__lowerCAmelCase )
lowerCamelCase__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__lowerCAmelCase , )
lowerCamelCase__ = LDMPipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
pipeline.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
UpperCamelCase : List[Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 9 | 0 |
def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] ):
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
lowerCamelCase__ = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__lowerCAmelCase ) )
return round(__lowerCAmelCase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS}
UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : tuple[int, ...] ):
lowerCamelCase__ = ""
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
for keychar, cipherchar in zip(cycle(__lowerCAmelCase ) , __lowerCAmelCase ):
lowerCamelCase__ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__lowerCAmelCase )
return decoded
def A__ ( __lowerCAmelCase : list[int] ):
lowerCamelCase__ = []
for key in product(__lowerCAmelCase , repeat=3 ):
lowerCamelCase__ = try_key(__lowerCAmelCase , __lowerCAmelCase )
if encoded is not None:
possibles.append(__lowerCAmelCase )
return possibles
def A__ ( __lowerCAmelCase : list[str] , __lowerCAmelCase : str ):
return [possible for possible in possibles if common_word in possible.lower()]
def A__ ( __lowerCAmelCase : str = "p059_cipher.txt" ):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = Path(__lowerCAmelCase ).parent.joinpath(__lowerCAmelCase ).read_text(encoding="""utf-8""" )
lowerCamelCase__ = [int(__lowerCAmelCase ) for number in data.strip().split(""",""" )]
lowerCamelCase__ = filter_valid_chars(__lowerCAmelCase )
for common_word in COMMON_WORDS:
lowerCamelCase__ = filter_common_word(__lowerCAmelCase , __lowerCAmelCase )
if len(__lowerCAmelCase ) == 1:
break
lowerCamelCase__ = possibles[0]
return sum(ord(__lowerCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F'{solution() = }')
| 9 | 0 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def A__ ( ):
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
lowerCamelCase__ = """__test_patch_submodule_mock__"""
with patch_submodule(_test_patching , """os.path.join""" , lowercase__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def A__ ( ):
assert _test_patching.open is open
lowerCamelCase__ = """__test_patch_submodule_builtin_mock__"""
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , """open""" , lowercase__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def A__ ( ):
lowerCamelCase__ = """__test_patch_submodule_missing_mock__"""
with patch_submodule(_test_patching , """pandas.read_csv""" , lowercase__ ):
pass
def A__ ( ):
lowerCamelCase__ = """__test_patch_submodule_missing_builtin_mock__"""
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , """len""" , lowercase__ ) is None
with patch_submodule(_test_patching , """len""" , lowercase__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def A__ ( ):
lowerCamelCase__ = """__test_patch_submodule_start_and_stop_mock__"""
lowerCamelCase__ = patch_submodule(_test_patching , """open""" , lowercase__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def A__ ( ):
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
lowerCamelCase__ = """__test_patch_submodule_successive_join__"""
lowerCamelCase__ = """__test_patch_submodule_successive_dirname__"""
lowerCamelCase__ = """__test_patch_submodule_successive_rename__"""
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , """os.path.join""" , lowercase__ ):
with patch_submodule(_test_patching , """os.rename""" , lowercase__ ):
with patch_submodule(_test_patching , """os.path.dirname""" , lowercase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , """os.rename""" , lowercase__ ):
with patch_submodule(_test_patching , """os.path.join""" , lowercase__ ):
with patch_submodule(_test_patching , """os.path.dirname""" , lowercase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def A__ ( ):
lowerCamelCase__ = """__test_patch_submodule_doesnt_exist_mock__"""
with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , lowercase__ ):
pass
with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , lowercase__ ):
pass
| 718 |
'''simple docstring'''
import argparse
import struct
import unittest
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ):
lowerCamelCase__ = data
# Initialize hash values
lowerCamelCase__ = [
0x6a_09_e6_67,
0xbb_67_ae_85,
0x3c_6e_f3_72,
0xa5_4f_f5_3a,
0x51_0e_52_7f,
0x9b_05_68_8c,
0x1f_83_d9_ab,
0x5b_e0_cd_19,
]
# Initialize round constants
lowerCamelCase__ = [
0x42_8a_2f_98,
0x71_37_44_91,
0xb5_c0_fb_cf,
0xe9_b5_db_a5,
0x39_56_c2_5b,
0x59_f1_11_f1,
0x92_3f_82_a4,
0xab_1c_5e_d5,
0xd8_07_aa_98,
0x12_83_5b_01,
0x24_31_85_be,
0x55_0c_7d_c3,
0x72_be_5d_74,
0x80_de_b1_fe,
0x9b_dc_06_a7,
0xc1_9b_f1_74,
0xe4_9b_69_c1,
0xef_be_47_86,
0x0f_c1_9d_c6,
0x24_0c_a1_cc,
0x2d_e9_2c_6f,
0x4a_74_84_aa,
0x5c_b0_a9_dc,
0x76_f9_88_da,
0x98_3e_51_52,
0xa8_31_c6_6d,
0xb0_03_27_c8,
0xbf_59_7f_c7,
0xc6_e0_0b_f3,
0xd5_a7_91_47,
0x06_ca_63_51,
0x14_29_29_67,
0x27_b7_0a_85,
0x2e_1b_21_38,
0x4d_2c_6d_fc,
0x53_38_0d_13,
0x65_0a_73_54,
0x76_6a_0a_bb,
0x81_c2_c9_2e,
0x92_72_2c_85,
0xa2_bf_e8_a1,
0xa8_1a_66_4b,
0xc2_4b_8b_70,
0xc7_6c_51_a3,
0xd1_92_e8_19,
0xd6_99_06_24,
0xf4_0e_35_85,
0x10_6a_a0_70,
0x19_a4_c1_16,
0x1e_37_6c_08,
0x27_48_77_4c,
0x34_b0_bc_b5,
0x39_1c_0c_b3,
0x4e_d8_aa_4a,
0x5b_9c_ca_4f,
0x68_2e_6f_f3,
0x74_8f_82_ee,
0x78_a5_63_6f,
0x84_c8_78_14,
0x8c_c7_02_08,
0x90_be_ff_fa,
0xa4_50_6c_eb,
0xbe_f9_a3_f7,
0xc6_71_78_f2,
]
lowerCamelCase__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def UpperCamelCase_ ( _lowerCAmelCase ):
lowerCamelCase__ = B"""\x80""" + (B"""\x00""" * (63 - (len(_lowerCAmelCase ) + 8) % 64))
lowerCamelCase__ = struct.pack(""">Q""" ,(len(_lowerCAmelCase ) * 8) )
return data + padding + big_endian_integer
def UpperCamelCase_ ( self ):
# Convert into blocks of 64 bytes
lowerCamelCase__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowerCamelCase__ = list(struct.unpack(""">16L""" ,_lowerCAmelCase ) )
# add 48 0-ed integers
words += [0] * 48
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowerCamelCase__ = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
lowerCamelCase__ = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
lowerCamelCase__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
lowerCamelCase__ = self.ror(_lowerCAmelCase ,6 ) ^ self.ror(_lowerCAmelCase ,11 ) ^ self.ror(_lowerCAmelCase ,25 )
lowerCamelCase__ = (e & f) ^ ((~e & 0xff_ff_ff_ff) & g)
lowerCamelCase__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
lowerCamelCase__ = self.ror(_lowerCAmelCase ,2 ) ^ self.ror(_lowerCAmelCase ,13 ) ^ self.ror(_lowerCAmelCase ,22 )
lowerCamelCase__ = (a & b) ^ (a & c) ^ (b & c)
lowerCamelCase__ = (sa + maj) % 0x1_00_00_00_00
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
lowerCamelCase__ = [a, b, c, d, e, f, g, h]
# Modify final values
lowerCamelCase__ = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
lowerCamelCase__ = """""".join([hex(_lowerCAmelCase )[2:].zfill(8 ) for value in self.hashes] )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
return 0xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations)
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
import hashlib
lowerCamelCase__ = bytes("""Test String""" ,"""utf-8""" )
self.assertEqual(SHAaaa(_lowerCAmelCase ).hash ,hashlib.shaaaa(_lowerCAmelCase ).hexdigest() )
def A__ ( ):
import doctest
doctest.testmod()
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
lowerCamelCase__ = f.read()
else:
lowerCamelCase__ = bytes(__lowerCAmelCase , """utf-8""" )
print(SHAaaa(__lowerCAmelCase ).hash )
if __name__ == "__main__":
main()
| 9 | 0 |
'''simple docstring'''
from collections import defaultdict
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
lowerCamelCase__ = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(__a ) )
]
lowerCamelCase__ = defaultdict(__a ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
lowerCamelCase__ = (1 << len(__a )) - 1
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
lowerCamelCase__ = self.count_ways_until(__a ,task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) ,task_no + 1 )
# save the value.
lowerCamelCase__ = total_ways_util
return self.dp[mask][task_no]
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
# Store the list of persons for each task
for i in range(len(__a ) ):
for j in task_performed[i]:
self.task[j].append(__a )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 ,1 )
if __name__ == "__main__":
UpperCamelCase : Optional[Any] = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
UpperCamelCase : List[Any] = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 719 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def A__ ( __lowerCAmelCase : Union[str, Any] ):
lowerCamelCase__ = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : Tuple ):
lowerCamelCase__ , lowerCamelCase__ = emb.weight.shape
lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
lowerCamelCase__ = emb.weight.data
return lin_layer
def A__ ( __lowerCAmelCase : Dict ):
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
lowerCamelCase__ = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
lowerCamelCase__ = mam_aaa["""model"""]
remove_ignore_keys_(__lowerCAmelCase )
lowerCamelCase__ = state_dict["""encoder.embed_tokens.weight"""].shape[0]
lowerCamelCase__ = MaMaaaConfig(
vocab_size=__lowerCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
lowerCamelCase__ = state_dict["""decoder.embed_tokens.weight"""]
lowerCamelCase__ = MaMaaaForConditionalGeneration(__lowerCAmelCase )
model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
lowerCamelCase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
UpperCamelCase : Tuple = parser.parse_args()
UpperCamelCase : List[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 9 | 0 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( *_lowerCAmelCase ,**_lowerCAmelCase ):
pass
@is_pipeline_test
@require_vision
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@require_torch
def UpperCamelCase_ ( self ):
lowerCamelCase__ = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" ,)
lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase__ = image_classifier(UpperCAmelCase__ ,candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(UpperCAmelCase__ ) ,[
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}],
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """c"""}, {"""score""": 0.333, """label""": """b"""}],
] ,)
lowerCamelCase__ = image_classifier([image] * 5 ,candidate_labels=["""A""", """B""", """C"""] ,batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) ,[
[
{"""score""": 0.333, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase__ )},
],
[
{"""score""": 0.333, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase__ )},
],
[
{"""score""": 0.333, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase__ )},
],
[
{"""score""": 0.333, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase__ )},
],
[
{"""score""": 0.333, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase__ )},
],
] ,)
@require_tf
def UpperCamelCase_ ( self ):
lowerCamelCase__ = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" ,framework="""tf""" )
lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase__ = image_classifier(UpperCAmelCase__ ,candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) ,[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}] ,)
lowerCamelCase__ = image_classifier([image] * 5 ,candidate_labels=["""A""", """B""", """C"""] ,batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) ,[
[
{"""score""": 0.333, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase__ )},
],
[
{"""score""": 0.333, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase__ )},
],
[
{"""score""": 0.333, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase__ )},
],
[
{"""score""": 0.333, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase__ )},
],
[
{"""score""": 0.333, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase__ )},
],
] ,)
@slow
@require_torch
def UpperCamelCase_ ( self ):
lowerCamelCase__ = pipeline(
task="""zero-shot-image-classification""" ,model="""openai/clip-vit-base-patch32""" ,)
# This is an image of 2 cats with remotes and no planes
lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase__ = image_classifier(UpperCAmelCase__ ,candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) ,[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] ,)
lowerCamelCase__ = image_classifier([image] * 5 ,candidate_labels=["""cat""", """plane""", """remote"""] ,batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) ,[
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 ,)
@slow
@require_tf
def UpperCamelCase_ ( self ):
lowerCamelCase__ = pipeline(
task="""zero-shot-image-classification""" ,model="""openai/clip-vit-base-patch32""" ,framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase__ = image_classifier(UpperCAmelCase__ ,candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) ,[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] ,)
lowerCamelCase__ = image_classifier([image] * 5 ,candidate_labels=["""cat""", """plane""", """remote"""] ,batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) ,[
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 ,)
| 720 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = BlipImageProcessor()
lowerCamelCase__ = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
lowerCamelCase__ = BlipProcessor(_lowerCAmelCase ,_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).tokenizer
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).image_processor
def UpperCamelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
lowerCamelCase__ = [Image.fromarray(np.moveaxis(_lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self ):
lowerCamelCase__ = BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
lowerCamelCase__ = self.get_image_processor(do_normalize=_lowerCAmelCase ,padding_value=1.0 )
lowerCamelCase__ = BlipProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=_lowerCAmelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = image_processor(_lowerCAmelCase ,return_tensors="""np""" )
lowerCamelCase__ = processor(images=_lowerCAmelCase ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = processor(text=_lowerCAmelCase )
lowerCamelCase__ = tokenizer(_lowerCAmelCase ,return_token_type_ids=_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ = processor.batch_decode(_lowerCAmelCase )
lowerCamelCase__ = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
| 9 | 0 |
'''simple docstring'''
import os
import sys
import transformers
UpperCamelCase : Union[str, Any] = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 721 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def A__ ( __lowerCAmelCase : Union[str, Any] ):
if hor == 128:
lowerCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
lowerCamelCase__ = (32, 128, 256)
lowerCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
lowerCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
lowerCamelCase__ = (32, 64, 128, 256)
lowerCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
lowerCamelCase__ = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
lowerCamelCase__ = model.state_dict()
lowerCamelCase__ = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_5536,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
lowerCamelCase__ = UNetaDModel(**__lowerCAmelCase )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase__ = state_dict.pop(__lowerCAmelCase )
hf_value_function.load_state_dict(__lowerCAmelCase )
torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , """w""" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( ):
lowerCamelCase__ = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 128, 256),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_5536,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
lowerCamelCase__ = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
lowerCamelCase__ = model
lowerCamelCase__ = UNetaDModel(**__lowerCAmelCase )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase__ = state_dict.pop(__lowerCAmelCase )
hf_value_function.load_state_dict(__lowerCAmelCase )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 9 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : str = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'deit'
def __init__( self ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=30_72 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-12 ,_lowerCAmelCase=2_24 ,_lowerCAmelCase=16 ,_lowerCAmelCase=3 ,_lowerCAmelCase=True ,_lowerCAmelCase=16 ,**_lowerCAmelCase ,):
super().__init__(**_lowerCAmelCase )
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = encoder_stride
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = version.parse('1.11' )
@property
def UpperCamelCase_ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase_ ( self ):
return 1E-4
| 700 |
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase = "▁" ,_lowerCAmelCase = True ,_lowerCAmelCase = "<unk>" ,_lowerCAmelCase = "</s>" ,_lowerCAmelCase = "<pad>" ,):
lowerCamelCase__ = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
lowerCamelCase__ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
lowerCamelCase__ = token_dict["""token"""]
lowerCamelCase__ = Tokenizer(Unigram() )
lowerCamelCase__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ) ,""" """ ),
normalizers.Lowercase(),
] )
lowerCamelCase__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ),
pre_tokenizers.Digits(individual_digits=_lowerCAmelCase ),
pre_tokenizers.Punctuation(),
] )
lowerCamelCase__ = decoders.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase )
lowerCamelCase__ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' ,special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] ,)
lowerCamelCase__ = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,):
lowerCamelCase__ = trainers.UnigramTrainer(
vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,)
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = [files]
self._tokenizer.train(_lowerCAmelCase ,trainer=_lowerCAmelCase )
self.add_unk_id()
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,):
lowerCamelCase__ = trainers.UnigramTrainer(
vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,)
self._tokenizer.train_from_iterator(_lowerCAmelCase ,trainer=_lowerCAmelCase )
self.add_unk_id()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = json.loads(self._tokenizer.to_str() )
lowerCamelCase__ = self.special_tokens["""unk"""]["""id"""]
lowerCamelCase__ = Tokenizer.from_str(json.dumps(_lowerCAmelCase ) )
| 9 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict ):
lowerCamelCase__ = LxmertConfig.from_json_file(lowercase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase__ = LxmertForPreTraining(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCamelCase : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 701 |
'''simple docstring'''
from __future__ import annotations
import math
def A__ ( __lowerCAmelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
UpperCamelCase : str = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def A__ ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
lowerCamelCase__ = []
for num in range(len(__lowerCAmelCase ) ):
lowerCamelCase__ = 0
while 2 * i * i <= odd_composites[num]:
lowerCamelCase__ = odd_composites[num] - 2 * i * i
if is_prime(__lowerCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__lowerCAmelCase ) == n:
return list_nums
return []
def A__ ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 9 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 1
lowerCamelCase__ = 3
lowerCamelCase__ = (32, 32)
lowerCamelCase__ = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowercase__ )
return image
@property
def UpperCamelCase_ ( self ):
torch.manual_seed(0 )
lowerCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=7 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,attention_head_dim=8 ,use_linear_projection=lowercase__ ,only_cross_attention=(True, True, False) ,num_class_embeds=1_00 ,)
return model
@property
def UpperCamelCase_ ( self ):
torch.manual_seed(0 )
lowerCamelCase__ = AutoencoderKL(
block_out_channels=[32, 32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def UpperCamelCase_ ( self ):
torch.manual_seed(0 )
lowerCamelCase__ = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,hidden_act="""gelu""" ,projection_dim=5_12 ,)
return CLIPTextModel(lowercase__ )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ = self.dummy_cond_unet_upscale
lowerCamelCase__ = DDPMScheduler()
lowerCamelCase__ = DDIMScheduler(prediction_type="""v_prediction""" )
lowerCamelCase__ = self.dummy_vae
lowerCamelCase__ = self.dummy_text_encoder
lowerCamelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase__ = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
lowerCamelCase__ = Image.fromarray(np.uinta(lowercase__ ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCamelCase__ = StableDiffusionUpscalePipeline(
unet=lowercase__ ,low_res_scheduler=lowercase__ ,scheduler=lowercase__ ,vae=lowercase__ ,text_encoder=lowercase__ ,tokenizer=lowercase__ ,max_noise_level=3_50 ,)
lowerCamelCase__ = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
lowerCamelCase__ = '''A painting of a squirrel eating a burger'''
lowerCamelCase__ = torch.Generator(device=lowercase__ ).manual_seed(0 )
lowerCamelCase__ = sd_pipe(
[prompt] ,image=lowercase__ ,generator=lowercase__ ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,)
lowerCamelCase__ = output.images
lowerCamelCase__ = torch.Generator(device=lowercase__ ).manual_seed(0 )
lowerCamelCase__ = sd_pipe(
[prompt] ,image=lowercase__ ,generator=lowercase__ ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowercase__ ,)[0]
lowerCamelCase__ = image[0, -3:, -3:, -1]
lowerCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
lowerCamelCase__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
lowerCamelCase__ = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self ):
lowerCamelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ = self.dummy_cond_unet_upscale
lowerCamelCase__ = DDPMScheduler()
lowerCamelCase__ = DDIMScheduler(prediction_type="""v_prediction""" )
lowerCamelCase__ = self.dummy_vae
lowerCamelCase__ = self.dummy_text_encoder
lowerCamelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase__ = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
lowerCamelCase__ = Image.fromarray(np.uinta(lowercase__ ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCamelCase__ = StableDiffusionUpscalePipeline(
unet=lowercase__ ,low_res_scheduler=lowercase__ ,scheduler=lowercase__ ,vae=lowercase__ ,text_encoder=lowercase__ ,tokenizer=lowercase__ ,max_noise_level=3_50 ,)
lowerCamelCase__ = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
lowerCamelCase__ = '''A painting of a squirrel eating a burger'''
lowerCamelCase__ = sd_pipe(
2 * [prompt] ,image=2 * [low_res_image] ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,)
lowerCamelCase__ = output.images
assert image.shape[0] == 2
lowerCamelCase__ = torch.Generator(device=lowercase__ ).manual_seed(0 )
lowerCamelCase__ = sd_pipe(
[prompt] ,image=lowercase__ ,generator=lowercase__ ,num_images_per_prompt=2 ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,)
lowerCamelCase__ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.dummy_cond_unet_upscale
lowerCamelCase__ = DDPMScheduler()
lowerCamelCase__ = DDIMScheduler(prediction_type="""v_prediction""" )
lowerCamelCase__ = self.dummy_vae
lowerCamelCase__ = self.dummy_text_encoder
lowerCamelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase__ = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
lowerCamelCase__ = Image.fromarray(np.uinta(lowercase__ ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
lowerCamelCase__ = unet.half()
lowerCamelCase__ = text_encoder.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase__ = StableDiffusionUpscalePipeline(
unet=lowercase__ ,low_res_scheduler=lowercase__ ,scheduler=lowercase__ ,vae=lowercase__ ,text_encoder=lowercase__ ,tokenizer=lowercase__ ,max_noise_level=3_50 ,)
lowerCamelCase__ = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
lowerCamelCase__ = '''A painting of a squirrel eating a burger'''
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = sd_pipe(
[prompt] ,image=lowercase__ ,generator=lowercase__ ,num_inference_steps=2 ,output_type="""np""" ,).images
lowerCamelCase__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
lowerCamelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
lowerCamelCase__ = '''stabilityai/stable-diffusion-x4-upscaler'''
lowerCamelCase__ = StableDiffusionUpscalePipeline.from_pretrained(lowercase__ )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
pipe.enable_attention_slicing()
lowerCamelCase__ = '''a cat sitting on a park bench'''
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = pipe(
prompt=lowercase__ ,image=lowercase__ ,generator=lowercase__ ,output_type="""np""" ,)
lowerCamelCase__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def UpperCamelCase_ ( self ):
lowerCamelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
lowerCamelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
lowerCamelCase__ = '''stabilityai/stable-diffusion-x4-upscaler'''
lowerCamelCase__ = StableDiffusionUpscalePipeline.from_pretrained(
lowercase__ ,torch_dtype=torch.floataa ,)
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
pipe.enable_attention_slicing()
lowerCamelCase__ = '''a cat sitting on a park bench'''
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = pipe(
prompt=lowercase__ ,image=lowercase__ ,generator=lowercase__ ,output_type="""np""" ,)
lowerCamelCase__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCamelCase_ ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
lowerCamelCase__ = '''stabilityai/stable-diffusion-x4-upscaler'''
lowerCamelCase__ = StableDiffusionUpscalePipeline.from_pretrained(
lowercase__ ,torch_dtype=torch.floataa ,)
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ = '''a cat sitting on a park bench'''
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = pipe(
prompt=lowercase__ ,image=lowercase__ ,generator=lowercase__ ,num_inference_steps=5 ,output_type="""np""" ,)
lowerCamelCase__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 702 |
'''simple docstring'''
def A__ ( ):
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(__lowerCAmelCase , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 9 | 0 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : list ):
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = collection[i]
lowerCamelCase__ = 0
lowerCamelCase__ = i - 1
while low <= high:
lowerCamelCase__ = (low + high) // 2
if val < collection[mid]:
lowerCamelCase__ = mid - 1
else:
lowerCamelCase__ = mid + 1
for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , -1 ):
lowerCamelCase__ = collection[j - 1]
lowerCamelCase__ = val
return collection
if __name__ == "__main__":
UpperCamelCase : str = input('Enter numbers separated by a comma:\n').strip()
UpperCamelCase : int = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 703 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'}
UpperCamelCase : Dict = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
UpperCamelCase : List[Any] = {
'camembert-base': 5_12,
}
UpperCamelCase : List[str] = '▁'
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = ['input_ids', 'attention_mask']
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="<unk>" ,_lowerCAmelCase="<pad>" ,_lowerCAmelCase="<mask>" ,_lowerCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] ,_lowerCAmelCase = None ,**_lowerCAmelCase ,):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else mask_token
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase ,eos_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,additional_special_tokens=_lowerCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_lowerCAmelCase ,)
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
lowerCamelCase__ = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowerCamelCase__ = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3}
lowerCamelCase__ = len(self.fairseq_tokens_to_ids )
lowerCamelCase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
lowerCamelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase ,token_ids_a=_lowerCAmelCase ,already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase_ ( self ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
return self.sp_model.encode(_lowerCAmelCase ,out_type=_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_lowerCAmelCase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = []
lowerCamelCase__ = """"""
lowerCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowerCamelCase__ = True
lowerCamelCase__ = []
else:
current_sub_tokens.append(_lowerCAmelCase )
lowerCamelCase__ = False
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def __getstate__( self ):
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
return state
def __setstate__( self ,_lowerCAmelCase ):
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ = os.path.join(
_lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase ,"""wb""" ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 9 | 0 |
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
UpperCamelCase : List[Any] = 'src/transformers'
UpperCamelCase : Optional[int] = 'docs/source/en/tasks'
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int ):
with open(a_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.readlines()
# Find the start prompt.
lowerCamelCase__ = 0
while not lines[start_index].startswith(a_ ):
start_index += 1
start_index += 1
lowerCamelCase__ = start_index
while not lines[end_index].startswith(a_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase : List[Any] = direct_transformers_import(TRANSFORMERS_PATH)
UpperCamelCase : int = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
UpperCamelCase : Optional[Any] = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def A__ ( __lowerCAmelCase : List[str] ):
lowerCamelCase__ = TASK_GUIDE_TO_MODELS[task_guide]
lowerCamelCase__ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(a_ , set() )
lowerCamelCase__ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=False ):
lowerCamelCase__ = _find_text_in_file(
filename=os.path.join(a_ , a_ ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , )
lowerCamelCase__ = get_model_list_for_task(a_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(a_ , a_ ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
""" to fix this.""" )
if __name__ == "__main__":
UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase : int = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 704 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = R"""\w+[.]\d+"""
lowerCamelCase__ = re.findall(__lowerCAmelCase , __lowerCAmelCase )
for pat in pats:
lowerCamelCase__ = key.replace(__lowerCAmelCase , """_""".join(pat.split(""".""" ) ) )
return key
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ):
lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCamelCase__ = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
lowerCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase__ = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase__ = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple=42 ):
# Step 1: Convert pytorch tensor to numpy
lowerCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCamelCase__ = flax_model.init_weights(PRNGKey(__lowerCAmelCase ) )
lowerCamelCase__ = flatten_dict(__lowerCAmelCase )
lowerCamelCase__ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase__ = rename_key(__lowerCAmelCase )
lowerCamelCase__ = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
lowerCamelCase__ , lowerCamelCase__ = rename_key_and_reshape_tensor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
lowerCamelCase__ = jnp.asarray(__lowerCAmelCase )
return unflatten_dict(__lowerCAmelCase )
| 9 | 0 |
'''simple docstring'''
import heapq
def A__ ( __lowerCAmelCase : dict ):
lowerCamelCase__ = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(__lowerCAmelCase , [-1 * len(__lowerCAmelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowerCamelCase__ = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowerCamelCase__ = heapq.heappop(__lowerCAmelCase )[1][0]
chosen_vertices.add(__lowerCAmelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowerCamelCase__ = elem[1][1].index(__lowerCAmelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(__lowerCAmelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
| 705 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] ,model_result["""ss"""] ):
lowerCamelCase__ = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sgugger/tiny-distilbert-classification"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,only_pretrain_model=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """patrickvonplaten/t5-tiny-random"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,configs=[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,"""Cannot do xla on CPU.""" )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=_lowerCAmelCase ,save_to_csv=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ,inference_memory_csv_file=os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ,env_info_csv_file=os.path.join(_lowerCAmelCase ,"""env.csv""" ) ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""env.csv""" ) ).exists() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(_lowerCAmelCase ):
self.assertTrue(hasattr(_lowerCAmelCase ,"""sequential""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""cumulative""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""current""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(_lowerCAmelCase ,"""log.txt""" ) ,log_print=_lowerCAmelCase ,trace_memory_line_by_line=_lowerCAmelCase ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""log.txt""" ) ).exists() )
| 9 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase__ (_UpperCamelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = RoCBertTokenizer
_UpperCamelCase = None
_UpperCamelCase = False
_UpperCamelCase = True
_UpperCamelCase = filter_non_english
def UpperCamelCase_ ( self ):
super().setUp()
lowerCamelCase__ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
lowerCamelCase__ = {}
lowerCamelCase__ = {}
for i, value in enumerate(__a ):
lowerCamelCase__ = i
lowerCamelCase__ = i
lowerCamelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""word_shape_file"""] )
lowerCamelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""word_pronunciation_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.word_shape_file ,"""w""" ,encoding="""utf-8""" ) as word_shape_writer:
json.dump(__a ,__a ,ensure_ascii=__a )
with open(self.word_pronunciation_file ,"""w""" ,encoding="""utf-8""" ) as word_pronunciation_writer:
json.dump(__a ,__a ,ensure_ascii=__a )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.tokenizer_class(self.vocab_file ,self.word_shape_file ,self.word_pronunciation_file )
lowerCamelCase__ = tokenizer.tokenize("""你好[SEP]你是谁""" )
self.assertListEqual(__a ,["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) ,[5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__a ) ,[5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__a ) ,[5, 6, 2, 5, 7, 8] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) ,["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = RoCBertBasicTokenizer(do_lower_case=__a ,strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""h\u00E9llo"""] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = RoCBertBasicTokenizer(do_lower_case=__a ,strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = RoCBertBasicTokenizer(do_lower_case=__a ,strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = RoCBertBasicTokenizer(do_lower_case=__a ,strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = RoCBertBasicTokenizer(do_lower_case=__a ,never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowerCamelCase__ = {}
for i, token in enumerate(__a ):
lowerCamelCase__ = i
lowerCamelCase__ = RoCBertWordpieceTokenizer(vocab=__a ,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) ,[] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) ,["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) ,["""[UNK]""", """runn""", """##ing"""] )
def UpperCamelCase_ ( self ):
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def UpperCamelCase_ ( self ):
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def UpperCamelCase_ ( self ):
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
if self.test_rust_tokenizer:
lowerCamelCase__ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__a ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
def UpperCamelCase_ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(__a ,**__a )
lowerCamelCase__ = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
lowerCamelCase__ = tokenizer_r.encode_plus(
__a ,return_attention_mask=__a ,return_token_type_ids=__a ,return_offsets_mapping=__a ,add_special_tokens=__a ,)
lowerCamelCase__ = tokenizer_r.do_lower_case if hasattr(__a ,"""do_lower_case""" ) else False
lowerCamelCase__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens["""offset_mapping"""] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = ["的", "人", "有"]
lowerCamelCase__ = "".join(__a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase__ = True
lowerCamelCase__ = self.tokenizer_class.from_pretrained(__a ,**__a )
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(__a ,**__a )
lowerCamelCase__ = tokenizer_p.encode(__a ,add_special_tokens=__a )
lowerCamelCase__ = tokenizer_r.encode(__a ,add_special_tokens=__a )
lowerCamelCase__ = tokenizer_r.convert_ids_to_tokens(__a )
lowerCamelCase__ = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a ,__a )
self.assertListEqual(__a ,__a )
lowerCamelCase__ = False
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(__a ,**__a )
lowerCamelCase__ = self.tokenizer_class.from_pretrained(__a ,**__a )
lowerCamelCase__ = tokenizer_r.encode(__a ,add_special_tokens=__a )
lowerCamelCase__ = tokenizer_p.encode(__a ,add_special_tokens=__a )
lowerCamelCase__ = tokenizer_r.convert_ids_to_tokens(__a )
lowerCamelCase__ = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase__ = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__a )
]
self.assertListEqual(__a ,__a )
self.assertListEqual(__a ,__a )
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.tokenizer_class(self.vocab_file ,self.word_shape_file ,self.word_pronunciation_file )
lowerCamelCase__ = tokenizer.encode("""你好""" ,add_special_tokens=__a )
lowerCamelCase__ = tokenizer.encode("""你是谁""" ,add_special_tokens=__a )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(__a )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(__a ,__a )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ = "你好,你是谁"
lowerCamelCase__ = tokenizer.tokenize(__a )
lowerCamelCase__ = tokenizer.convert_tokens_to_ids(__a )
lowerCamelCase__ = tokenizer.convert_tokens_to_shape_ids(__a )
lowerCamelCase__ = tokenizer.convert_tokens_to_pronunciation_ids(__a )
lowerCamelCase__ = tokenizer.prepare_for_model(
__a ,__a ,__a ,add_special_tokens=__a )
lowerCamelCase__ = tokenizer.encode_plus(__a ,add_special_tokens=__a )
self.assertEqual(__a ,__a )
| 706 |
'''simple docstring'''
from math import factorial
UpperCamelCase : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def A__ ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__lowerCAmelCase ) )
def A__ ( __lowerCAmelCase : int = 60 , __lowerCAmelCase : int = 100_0000 ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
lowerCamelCase__ = 0
# the cached sizes of the previous chains
lowerCamelCase__ = {}
for start_chain_element in range(1 , __lowerCAmelCase ):
# The temporary set will contain the elements of the chain
lowerCamelCase__ = set()
lowerCamelCase__ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCamelCase__ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__lowerCAmelCase )
chain_set_length += 1
lowerCamelCase__ = digit_factorial_sum(__lowerCAmelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCamelCase__ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution()}')
| 9 | 0 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : Tuple ):
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("""only integers accepted as input""" )
else:
lowerCamelCase__ = str(abs(__UpperCamelCase ) )
lowerCamelCase__ = [list(__UpperCamelCase ) for char in range(len(__UpperCamelCase ) )]
for index in range(len(__UpperCamelCase ) ):
num_transpositions[index].pop(__UpperCamelCase )
return max(
int("""""".join(list(__UpperCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 707 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
UpperCamelCase : Optional[Any] = 'src/diffusers'
# Matches is_xxx_available()
UpperCamelCase : Union[str, Any] = re.compile(r'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
UpperCamelCase : Optional[Any] = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
UpperCamelCase : Optional[int] = '\n{0} = None\n'
UpperCamelCase : Optional[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
UpperCamelCase : Any = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def A__ ( __lowerCAmelCase : Union[str, Any] ):
lowerCamelCase__ = _re_backend.findall(__lowerCAmelCase )
if len(__lowerCAmelCase ) == 0:
return None
return "_and_".join(__lowerCAmelCase )
def A__ ( ):
with open(os.path.join(__lowerCAmelCase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase__ = 0
lowerCamelCase__ = {}
# Go through the end of the file
while line_index < len(__lowerCAmelCase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
lowerCamelCase__ = []
# Until we unindent, add backend objects to the list
while line_index < len(__lowerCAmelCase ) and len(lines[line_index] ) > 1:
lowerCamelCase__ = lines[line_index]
lowerCamelCase__ = _re_single_line_import.search(__lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__lowerCAmelCase ) > 0:
lowerCamelCase__ = objects
else:
line_index += 1
return backend_specific_objects
def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict ):
if name.isupper():
return DUMMY_CONSTANT.format(__lowerCAmelCase )
elif name.islower():
return DUMMY_FUNCTION.format(__lowerCAmelCase , __lowerCAmelCase )
else:
return DUMMY_CLASS.format(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : Optional[int]=None ):
if backend_specific_objects is None:
lowerCamelCase__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase__ = {}
for backend, objects in backend_specific_objects.items():
lowerCamelCase__ = """[""" + """, """.join(F'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]"""
lowerCamelCase__ = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__lowerCAmelCase , __lowerCAmelCase ) for o in objects] )
lowerCamelCase__ = dummy_file
return dummy_files
def A__ ( __lowerCAmelCase : List[str]=False ):
lowerCamelCase__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase__ = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
lowerCamelCase__ = os.path.join(__lowerCAmelCase , """utils""" )
lowerCamelCase__ = {
backend: os.path.join(__lowerCAmelCase , F'''dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py''' )
for backend in dummy_files.keys()
}
lowerCamelCase__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__lowerCAmelCase ):
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.read()
else:
lowerCamelCase__ = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py as the main '''
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
F'''diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py. Run `make fix-copies` '''
"""to fix this.""" )
if __name__ == "__main__":
UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase : Any = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 9 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
UpperCamelCase : Dict = transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def A__ ( __lowerCAmelCase : Optional[int] ):
if isinstance(__lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
lowerCamelCase__ = [image]
lowerCamelCase__ = [trans(img.convert("""RGB""" ) ) for img in image]
lowerCamelCase__ = torch.stack(__lowerCAmelCase )
return image
class UpperCamelCase__ (_lowerCAmelCase ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ):
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCamelCase__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_lowerCAmelCase ,scheduler=_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if strength < 0 or strength > 1:
raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = min(int(num_inference_steps * strength ) ,_lowerCAmelCase )
lowerCamelCase__ = max(num_inference_steps - init_timestep ,0 )
lowerCamelCase__ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ):
if not isinstance(_lowerCAmelCase ,(torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowerCAmelCase )}''' )
lowerCamelCase__ = image.to(device=_lowerCAmelCase ,dtype=_lowerCAmelCase )
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) and len(_lowerCAmelCase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_lowerCAmelCase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase__ = init_latents.shape
lowerCamelCase__ = randn_tensor(_lowerCAmelCase ,generator=_lowerCAmelCase ,device=_lowerCAmelCase ,dtype=_lowerCAmelCase )
# get latents
print("""add noise to latents at timestep""" ,_lowerCAmelCase )
lowerCamelCase__ = self.scheduler.add_noise(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = init_latents
return latents
@torch.no_grad()
def __call__( self ,_lowerCAmelCase = None ,_lowerCAmelCase = 0.8 ,_lowerCAmelCase = 1 ,_lowerCAmelCase = None ,_lowerCAmelCase = 0.0 ,_lowerCAmelCase = 50 ,_lowerCAmelCase = None ,_lowerCAmelCase = "pil" ,_lowerCAmelCase = True ,):
self.check_inputs(_lowerCAmelCase )
# 2. Preprocess image
lowerCamelCase__ = preprocess(_lowerCAmelCase )
# 3. set timesteps
self.scheduler.set_timesteps(_lowerCAmelCase ,device=self.device )
lowerCamelCase__ , lowerCamelCase__ = self.get_timesteps(_lowerCAmelCase ,_lowerCAmelCase ,self.device )
lowerCamelCase__ = timesteps[:1].repeat(_lowerCAmelCase )
# 4. Prepare latent variables
lowerCamelCase__ = self.prepare_latents(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,self.unet.dtype ,self.device ,_lowerCAmelCase )
lowerCamelCase__ = latents
# 5. Denoising loop
for t in self.progress_bar(_lowerCAmelCase ):
# 1. predict noise model_output
lowerCamelCase__ = self.unet(_lowerCAmelCase ,_lowerCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase__ = self.scheduler.step(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,eta=_lowerCAmelCase ,use_clipped_model_output=_lowerCAmelCase ,generator=_lowerCAmelCase ,).prev_sample
lowerCamelCase__ = (image / 2 + 0.5).clamp(0 ,1 )
lowerCamelCase__ = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowerCamelCase__ = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_lowerCAmelCase )
| 708 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase : List[Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ (a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = GPTSwaTokenizer
_UpperCamelCase = False
_UpperCamelCase = True
_UpperCamelCase = False
def UpperCamelCase_ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ,eos_token="""<unk>""" ,bos_token="""<unk>""" ,pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = """This is a test"""
lowerCamelCase__ = """This is a test"""
return input_text, output_text
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """<s>"""
lowerCamelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) ,_lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""j""" )
self.assertEqual(len(_lowerCAmelCase ) ,20_00 )
def UpperCamelCase_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,20_00 )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase )
lowerCamelCase__ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowerCAmelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) ,[4_65, 2_87, 2_65, 6_31, 8_42] )
lowerCamelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
_lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ,)
# fmt: on
lowerCamelCase__ = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase ,[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] ,)
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
# fmt: off
self.assertListEqual(
_lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def UpperCamelCase_ ( self ):
lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase )
lowerCamelCase__ = ["""This is a test""", """I was born in 92000, and this is falsé."""]
lowerCamelCase__ = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ):
self.assertListEqual(tokenizer.encode_fast(_lowerCAmelCase ) ,_lowerCAmelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ):
self.assertEqual(tokenizer.decode_fast(_lowerCAmelCase ) ,_lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
lowerCamelCase__ = {"""input_ids""": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase ,model_name="""AI-Sweden/gpt-sw3-126m""" ,sequences=_lowerCAmelCase ,)
| 9 | 0 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : str = logging.get_logger(__name__)
def A__ ( __lowerCAmelCase : Any ):
lowerCamelCase__ = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
lowerCamelCase__ = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , snake_case_ )
if matches:
lowerCamelCase__ = float(matches[1] )
lowerCamelCase__ = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
lowerCamelCase__ = 1001
lowerCamelCase__ = """imagenet-1k-id2label.json"""
lowerCamelCase__ = """huggingface/label-files"""
lowerCamelCase__ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ = {int(snake_case_ ) + 1: v for k, v in idalabel.items()}
lowerCamelCase__ = """background"""
lowerCamelCase__ = idalabel
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
return config
def A__ ( ):
lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=False ):
lowerCamelCase__ = get_mobilenet_va_config(snake_case_ )
# Load 🤗 model
lowerCamelCase__ = MobileNetVaForImageClassification(snake_case_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(snake_case_ , snake_case_ , snake_case_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
lowerCamelCase__ = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
lowerCamelCase__ = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCamelCase__ = model(**snake_case_ )
lowerCamelCase__ = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
lowerCamelCase__ = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
lowerCamelCase__ = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
lowerCamelCase__ = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , snake_case_ , atol=1e-4 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
print("""Pushing to the hub...""" )
lowerCamelCase__ = """google/""" + model_name
image_processor.push_to_hub(snake_case_ )
model.push_to_hub(snake_case_ )
if __name__ == "__main__":
UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCamelCase : Optional[int] = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 709 |
'''simple docstring'''
from manim import *
class UpperCamelCase__ (a ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
lowerCamelCase__ = Rectangle(height=0.5 ,width=0.5 )
lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = VGroup(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""CPU""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCAmelCase )
lowerCamelCase__ = [mem.copy() for i in range(1 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""GPU""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
gpu.align_to(_lowerCAmelCase ,_lowerCAmelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_lowerCAmelCase )
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""Model""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,)
lowerCamelCase__ = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' ,font_size=24 ,)
lowerCamelCase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase__ = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCAmelCase ,run_time=2.5 ) ,Write(_lowerCAmelCase ) ,Write(_lowerCAmelCase ) )
self.add(_lowerCAmelCase )
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
for i, rect in enumerate(_lowerCAmelCase ):
lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase ,opacity=0.7 )
cpu_target.move_to(_lowerCAmelCase )
cpu_target.generate_target()
lowerCamelCase__ = 0.46 / 4
lowerCamelCase__ = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=_lowerCAmelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=_lowerCAmelCase ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=_lowerCAmelCase ,buff=0.0 )
cpu_targs.append(_lowerCAmelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCAmelCase ) )
second_animations.append(MoveToTarget(_lowerCAmelCase ,run_time=1.5 ) )
self.play(*_lowerCAmelCase )
self.play(*_lowerCAmelCase )
self.wait()
| 9 | 0 |
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCamelCase__ (SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=3.6 ):
lowerCamelCase__ = tokenizer
lowerCamelCase__ = tokenizer.bos_token_id
lowerCamelCase__ = dataset
lowerCamelCase__ = seq_length
lowerCamelCase__ = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
lowerCamelCase__ = iter(self.dataset )
lowerCamelCase__ = True
while more_examples:
lowerCamelCase__ = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(UpperCamelCase__ )["""content"""] )
buffer_len += len(buffer[-1] )
except StopIteration:
lowerCamelCase__ = False
break
lowerCamelCase__ = tokenizer(UpperCamelCase__ ,truncation=UpperCamelCase__ )['''input_ids''']
lowerCamelCase__ = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 ,len(UpperCamelCase__ ) ,self.seq_length ):
lowerCamelCase__ = all_token_ids[i : i + self.seq_length]
if len(UpperCamelCase__ ) == self.seq_length:
yield torch.tensor(UpperCamelCase__ )
def A__ ( __lowerCAmelCase ):
lowerCamelCase__ = {'''streaming''': True}
lowerCamelCase__ = load_dataset(args.dataset_name , split="""train""" , **__UpperCamelCase )
lowerCamelCase__ = ConstantLengthDataset(__UpperCamelCase , __UpperCamelCase , seq_length=args.seq_length )
lowerCamelCase__ = DataLoader(__UpperCamelCase , batch_size=args.batch_size )
return eval_dataloader
def A__ ( __lowerCAmelCase ):
model.eval()
lowerCamelCase__ = []
for step, batch in enumerate(__UpperCamelCase ):
with torch.no_grad():
lowerCamelCase__ = model(__UpperCamelCase , labels=__UpperCamelCase )
lowerCamelCase__ = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
lowerCamelCase__ = torch.mean(torch.cat(__UpperCamelCase ) )
try:
lowerCamelCase__ = torch.exp(__UpperCamelCase )
except OverflowError:
lowerCamelCase__ = float("""inf""" )
return loss.item(), perplexity.item()
# Setup Accelerator
UpperCamelCase : List[Any] = Accelerator()
# Parse configuration
UpperCamelCase : Optional[int] = HfArgumentParser(EvaluationArguments)
UpperCamelCase : Union[str, Any] = parser.parse_args()
set_seed(args.seed)
# Logging
UpperCamelCase : Dict = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
UpperCamelCase : List[str] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
UpperCamelCase : Optional[Any] = create_dataloader(args)
# Prepare everything with our `accelerator`.
UpperCamelCase : Optional[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
UpperCamelCase : Optional[int] = evaluate(args)
logger.info(F'loss/eval: {eval_loss}, perplexity: {perplexity}')
| 710 |
'''simple docstring'''
UpperCamelCase : Tuple = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
UpperCamelCase : list[bool | None] = [None] * 10_00_00_00
UpperCamelCase : Tuple = True
UpperCamelCase : Optional[int] = False
def A__ ( __lowerCAmelCase : int ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase__ = chain(next_number(__lowerCAmelCase ) )
lowerCamelCase__ = number_chain
while number < 1000_0000:
lowerCamelCase__ = number_chain
number *= 10
return number_chain
def A__ ( __lowerCAmelCase : int = 1000_0000 ):
for i in range(1 , __lowerCAmelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution() = }')
| 9 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Any = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class UpperCamelCase__ (lowerCAmelCase__ ,lowerCAmelCase__ ):
_UpperCamelCase = "bit"
_UpperCamelCase = ["preactivation", "bottleneck"]
_UpperCamelCase = ["SAME", "VALID"]
def __init__( self ,_lowerCAmelCase=3 ,_lowerCAmelCase=64 ,_lowerCAmelCase=[2_56, 5_12, 10_24, 20_48] ,_lowerCAmelCase=[3, 4, 6, 3] ,_lowerCAmelCase="preactivation" ,_lowerCAmelCase="relu" ,_lowerCAmelCase=None ,_lowerCAmelCase=32 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=False ,_lowerCAmelCase=32 ,_lowerCAmelCase=1 ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,**_lowerCAmelCase ,):
super().__init__(**_lowerCamelCase )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
lowerCamelCase__ = global_padding.upper()
else:
raise ValueError(F'''Padding strategy {global_padding} not supported''' )
lowerCamelCase__ = num_channels
lowerCamelCase__ = embedding_size
lowerCamelCase__ = hidden_sizes
lowerCamelCase__ = depths
lowerCamelCase__ = layer_type
lowerCamelCase__ = hidden_act
lowerCamelCase__ = global_padding
lowerCamelCase__ = num_groups
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = embedding_dynamic_padding
lowerCamelCase__ = output_stride
lowerCamelCase__ = width_factor
lowerCamelCase__ = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 ,len(_lowerCamelCase ) + 1 )]
lowerCamelCase__ , lowerCamelCase__ = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase ,out_indices=_lowerCamelCase ,stage_names=self.stage_names )
| 711 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'donut-swin'
_UpperCamelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self ,_lowerCAmelCase=2_24 ,_lowerCAmelCase=4 ,_lowerCAmelCase=3 ,_lowerCAmelCase=96 ,_lowerCAmelCase=[2, 2, 6, 2] ,_lowerCAmelCase=[3, 6, 12, 24] ,_lowerCAmelCase=7 ,_lowerCAmelCase=4.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=False ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-5 ,**_lowerCAmelCase ,):
super().__init__(**_lowerCAmelCase )
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = embed_dim
lowerCamelCase__ = depths
lowerCamelCase__ = len(_lowerCAmelCase )
lowerCamelCase__ = num_heads
lowerCamelCase__ = window_size
lowerCamelCase__ = mlp_ratio
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = hidden_act
lowerCamelCase__ = use_absolute_embeddings
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
| 9 | 0 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : int = logging.get_logger(__name__)
def A__ ( __lowerCAmelCase : Optional[Any] ):
lowerCamelCase__ = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
lowerCamelCase__ = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
lowerCamelCase__ = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCamelCase__ = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
lowerCamelCase__ = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(__snake_case )-1}''' )
if "norm" in key:
lowerCamelCase__ = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCamelCase__ = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
lowerCamelCase__ = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(__snake_case )-1}''' )
if "layer_norm1" in key:
lowerCamelCase__ = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
lowerCamelCase__ = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
lowerCamelCase__ = key[key.find("""block""" ) + len("""block""" )]
lowerCamelCase__ = key.replace(F'''block{idx}''' , F'''block.{int(__snake_case )-1}''' )
if "attn.q" in key:
lowerCamelCase__ = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
lowerCamelCase__ = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
lowerCamelCase__ = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
lowerCamelCase__ = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
lowerCamelCase__ = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
lowerCamelCase__ = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
lowerCamelCase__ = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
lowerCamelCase__ = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCamelCase__ = key[key.find("""linear_c""" ) + len("""linear_c""" )]
lowerCamelCase__ = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(__snake_case )-1}''' )
if "bot_conv" in key:
lowerCamelCase__ = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
lowerCamelCase__ = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
lowerCamelCase__ = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
lowerCamelCase__ = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
lowerCamelCase__ = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
lowerCamelCase__ = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
lowerCamelCase__ = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
lowerCamelCase__ = key.replace("""module.last_layer_depth""" , """head.head""" )
lowerCamelCase__ = value
return new_state_dict
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ):
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCamelCase__ = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCamelCase__ = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCamelCase__ = kv_weight[
: config.hidden_sizes[i], :
]
lowerCamelCase__ = kv_bias[: config.hidden_sizes[i]]
lowerCamelCase__ = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCamelCase__ = kv_bias[config.hidden_sizes[i] :]
def A__ ( ):
lowerCamelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase__ = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return image
@torch.no_grad()
def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Tuple=None ):
lowerCamelCase__ = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCamelCase__ = GLPNImageProcessor()
# prepare image
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=__snake_case , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
lowerCamelCase__ = torch.load(__snake_case , map_location=torch.device("""cpu""" ) )
# rename keys
lowerCamelCase__ = rename_keys(__snake_case )
# key and value matrices need special treatment
read_in_k_v(__snake_case , __snake_case )
# create HuggingFace model and load state dict
lowerCamelCase__ = GLPNForDepthEstimation(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# forward pass
lowerCamelCase__ = model(__snake_case )
lowerCamelCase__ = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCamelCase__ = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
lowerCamelCase__ = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
lowerCamelCase__ = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , __snake_case , atol=1e-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(__snake_case , __snake_case ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=__snake_case , )
image_processor.push_to_hub(
repo_path_or_name=Path(__snake_case , __snake_case ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=__snake_case , )
if __name__ == "__main__":
UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
UpperCamelCase : str = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 712 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
UpperCamelCase : Optional[Any] = ['small', 'medium', 'large']
UpperCamelCase : Dict = 'lm_head.decoder.weight'
UpperCamelCase : int = 'lm_head.weight'
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
lowerCamelCase__ = torch.load(__lowerCAmelCase )
lowerCamelCase__ = d.pop(__lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
UpperCamelCase : Dict = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
UpperCamelCase : Any = os.path.join(args.dialogpt_path, F'{MODEL}_ft.pkl')
UpperCamelCase : str = F'./DialoGPT-{MODEL}'
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 9 | 0 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : int = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class UpperCamelCase__ (__lowercase ):
'''simple docstring'''
_UpperCamelCase = 'owlvit_text_model'
def __init__( self ,_lowerCAmelCase=4_94_08 ,_lowerCAmelCase=5_12 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=12 ,_lowerCAmelCase=8 ,_lowerCAmelCase=16 ,_lowerCAmelCase="quick_gelu" ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1.0 ,_lowerCAmelCase=0 ,_lowerCAmelCase=4_94_06 ,_lowerCAmelCase=4_94_07 ,**_lowerCAmelCase ,):
super().__init__(pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,**__A )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = hidden_act
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = initializer_range
lowerCamelCase__ = initializer_factor
@classmethod
def UpperCamelCase_ ( cls ,_lowerCAmelCase ,**_lowerCAmelCase ):
cls._set_token_in_kwargs(__A )
lowerCamelCase__ , lowerCamelCase__ = cls.get_config_dict(__A ,**__A )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
lowerCamelCase__ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A ,**__A )
class UpperCamelCase__ (__lowercase ):
'''simple docstring'''
_UpperCamelCase = 'owlvit_vision_model'
def __init__( self ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=30_72 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=3 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=32 ,_lowerCAmelCase="quick_gelu" ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1.0 ,**_lowerCAmelCase ,):
super().__init__(**__A )
lowerCamelCase__ = hidden_size
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = initializer_range
lowerCamelCase__ = initializer_factor
@classmethod
def UpperCamelCase_ ( cls ,_lowerCAmelCase ,**_lowerCAmelCase ):
cls._set_token_in_kwargs(__A )
lowerCamelCase__ , lowerCamelCase__ = cls.get_config_dict(__A ,**__A )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
lowerCamelCase__ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A ,**__A )
class UpperCamelCase__ (__lowercase ):
'''simple docstring'''
_UpperCamelCase = 'owlvit'
_UpperCamelCase = True
def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=5_12 ,_lowerCAmelCase=2.6592 ,_lowerCAmelCase=True ,**_lowerCAmelCase ,):
super().__init__(**__A )
if text_config is None:
lowerCamelCase__ = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
lowerCamelCase__ = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
lowerCamelCase__ = OwlViTTextConfig(**__A )
lowerCamelCase__ = OwlViTVisionConfig(**__A )
lowerCamelCase__ = projection_dim
lowerCamelCase__ = logit_scale_init_value
lowerCamelCase__ = return_dict
lowerCamelCase__ = 1.0
@classmethod
def UpperCamelCase_ ( cls ,_lowerCAmelCase ,**_lowerCAmelCase ):
cls._set_token_in_kwargs(__A )
lowerCamelCase__ , lowerCamelCase__ = cls.get_config_dict(__A ,**__A )
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A ,**__A )
@classmethod
def UpperCamelCase_ ( cls ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ):
lowerCamelCase__ = {}
lowerCamelCase__ = text_config
lowerCamelCase__ = vision_config
return cls.from_dict(__A ,**__A )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = copy.deepcopy(self.__dict__ )
lowerCamelCase__ = self.text_config.to_dict()
lowerCamelCase__ = self.vision_config.to_dict()
lowerCamelCase__ = self.__class__.model_type
return output
class UpperCamelCase__ (__lowercase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ):
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def UpperCamelCase_ ( self ):
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def UpperCamelCase_ ( self ):
return 1E-4
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = None ,):
lowerCamelCase__ = super().generate_dummy_inputs(
processor.tokenizer ,batch_size=__A ,seq_length=__A ,framework=__A )
lowerCamelCase__ = super().generate_dummy_inputs(
processor.image_processor ,batch_size=__A ,framework=__A )
return {**text_input_dict, **image_input_dict}
@property
def UpperCamelCase_ ( self ):
return 14
| 713 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=30 ,_lowerCAmelCase=2 ,_lowerCAmelCase=3 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=32 ,_lowerCAmelCase=2 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=10 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=3 ,_lowerCAmelCase=0.6 ,_lowerCAmelCase=None ,):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = is_training
lowerCamelCase__ = use_labels
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = mask_ratio
lowerCamelCase__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase__ = (image_size // patch_size) ** 2
lowerCamelCase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCamelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ):
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,decoder_hidden_size=self.hidden_size ,decoder_num_hidden_layers=self.num_hidden_layers ,decoder_num_attention_heads=self.num_attention_heads ,decoder_intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = TFViTMAEModel(config=_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase )
# expected sequence length = num_patches
lowerCamelCase__ = (self.image_size // self.patch_size) ** 2
lowerCamelCase__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase__ = 1
lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase )
lowerCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase )
lowerCamelCase__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.prepare_config_and_inputs()
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = config_and_inputs
lowerCamelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase__ (a ,a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_UpperCamelCase = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def UpperCamelCase_ ( self ):
lowerCamelCase__ = TFViTMAEModelTester(self )
lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase ,hidden_size=37 )
def UpperCamelCase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
lowerCamelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase ,tf.keras.layers.Layer ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ = [*signature.parameters.keys()]
lowerCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
# make the mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
lowerCamelCase__ = copy.deepcopy(self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) )
lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase )
lowerCamelCase__ = outputs_dict[0].numpy()
lowerCamelCase__ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) ,1E-6 )
def UpperCamelCase_ ( self ):
# make the mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_lowerCAmelCase ):
lowerCamelCase__ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_lowerCAmelCase ):
lowerCamelCase__ = v.numpy()
else:
lowerCamelCase__ = np.array(_lowerCAmelCase )
return inputs_np_dict
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = prepare_numpy_arrays(_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase )
self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
# make masks reproducible
np.random.seed(2 )
lowerCamelCase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ = tf.constant(_lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase__ = tf_noise
super().check_pt_tf_models(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_lowerCAmelCase )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(_lowerCAmelCase ,_lowerCAmelCase ),)
if isinstance(_lowerCAmelCase ,_lowerCAmelCase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_lowerCAmelCase ,"""_keras_serializable""" ,_lowerCAmelCase )
}
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ = tf.convert_to_tensor(_lowerCAmelCase )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
lowerCamelCase__ = main_layer_class(_lowerCAmelCase )
lowerCamelCase__ = {
name: tf.keras.Input(tensor.shape[1:] ,dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowerCamelCase__ = tf.keras.Model(_lowerCAmelCase ,outputs=main_layer(_lowerCAmelCase ) )
lowerCamelCase__ = model(_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ = os.path.join(_lowerCAmelCase ,"""keras_model.h5""" )
model.save(_lowerCAmelCase )
lowerCamelCase__ = tf.keras.models.load_model(
_lowerCAmelCase ,custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_lowerCAmelCase ,tf.keras.Model )
lowerCamelCase__ = model(_lowerCAmelCase )
self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
lowerCamelCase__ = outputs.last_hidden_state.numpy()
lowerCamelCase__ = 0
else:
lowerCamelCase__ = outputs.logits.numpy()
lowerCamelCase__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase ,saved_model=_lowerCAmelCase )
lowerCamelCase__ = model_class.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
lowerCamelCase__ = after_outputs["""last_hidden_state"""].numpy()
lowerCamelCase__ = 0
else:
lowerCamelCase__ = after_outputs["""logits"""].numpy()
lowerCamelCase__ = 0
lowerCamelCase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase ,1E-5 )
def UpperCamelCase_ ( self ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
lowerCamelCase__ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_lowerCAmelCase )
lowerCamelCase__ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowerCamelCase__ = model_class.from_config(model.config )
lowerCamelCase__ = new_model(_lowerCAmelCase ) # Build model
new_model.set_weights(model.get_weights() )
lowerCamelCase__ = new_model(_lowerCAmelCase ,noise=_lowerCAmelCase )
self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def UpperCamelCase_ ( self ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def UpperCamelCase_ ( self ):
pass
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(_lowerCAmelCase )
def A__ ( ):
lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCamelCase__ = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
lowerCamelCase__ = self.default_image_processor
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase__ = ViTMAEConfig()
lowerCamelCase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(1, num_patches) )
# forward pass
lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase )
# verify the logits
lowerCamelCase__ = tf.convert_to_tensor([1, 1_96, 7_68] )
self.assertEqual(outputs.logits.shape ,_lowerCAmelCase )
lowerCamelCase__ = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] ,_lowerCAmelCase ,atol=1E-4 )
| 9 | 0 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
UpperCamelCase : Dict = logging.get_logger(__name__)
class UpperCamelCase__ (UpperCamelCase__ ):
'''simple docstring'''
def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ):
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" ,__A ,)
super().__init__(*__A ,**__A )
| 714 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=7 ,_lowerCAmelCase=3 ,_lowerCAmelCase=18 ,_lowerCAmelCase=30 ,_lowerCAmelCase=4_00 ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,):
lowerCamelCase__ = size if size is not None else {"""shortest_edge""": 18}
lowerCamelCase__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = min_resolution
lowerCamelCase__ = max_resolution
lowerCamelCase__ = do_resize
lowerCamelCase__ = size
lowerCamelCase__ = do_center_crop
lowerCamelCase__ = crop_size
lowerCamelCase__ = do_normalize
lowerCamelCase__ = image_mean
lowerCamelCase__ = image_std
def UpperCamelCase_ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase__ (a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = LevitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
lowerCamelCase__ = LevitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""image_std""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""size""" ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase ,Image.Image )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase ,np.ndarray )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase ,torch.Tensor )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 9 | 0 |
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Any ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
lowerCamelCase__ = (boundary[1] - boundary[0]) / steps
lowerCamelCase__ = boundary[0]
lowerCamelCase__ = boundary[1]
lowerCamelCase__ = make_points(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase__ = 0.0
y += (h / 2.0) * f(_UpperCAmelCase )
for i in x_i:
# print(i)
y += h * f(_UpperCAmelCase )
y += (h / 2.0) * f(_UpperCAmelCase )
return y
def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any ):
lowerCamelCase__ = a + h
while x < (b - h):
yield x
lowerCamelCase__ = x + h
def A__ ( __lowerCAmelCase : str ): # enter your function here
lowerCamelCase__ = (x - 0) * (x - 0)
return y
def A__ ( ):
lowerCamelCase__ = 0.0 # Lower bound of integration
lowerCamelCase__ = 1.0 # Upper bound of integration
lowerCamelCase__ = 10.0 # define number of steps or resolution
lowerCamelCase__ = [a, b] # define boundary of integration
lowerCamelCase__ = method_a(_UpperCAmelCase , _UpperCAmelCase )
print(F'''y = {y}''' )
if __name__ == "__main__":
main()
| 715 |
'''simple docstring'''
import numpy
# List of input, output pairs
UpperCamelCase : List[Any] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCamelCase : Optional[int] = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
UpperCamelCase : int = [2, 4, 1, 5]
UpperCamelCase : int = len(train_data)
UpperCamelCase : Dict = 0.009
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : str="train" ):
return calculate_hypothesis_value(__lowerCAmelCase , __lowerCAmelCase ) - output(
__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : Any ):
lowerCamelCase__ = 0
for i in range(len(__lowerCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=m ):
lowerCamelCase__ = 0
for i in range(__lowerCAmelCase ):
if index == -1:
summation_value += _error(__lowerCAmelCase )
else:
summation_value += _error(__lowerCAmelCase ) * train_data[i][0][index]
return summation_value
def A__ ( __lowerCAmelCase : List[Any] ):
lowerCamelCase__ = summation_of_cost_derivative(__lowerCAmelCase , __lowerCAmelCase ) / m
return cost_derivative_value
def A__ ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCamelCase__ = 0.00_0002
lowerCamelCase__ = 0
lowerCamelCase__ = 0
while True:
j += 1
lowerCamelCase__ = [0, 0, 0, 0]
for i in range(0 , len(__lowerCAmelCase ) ):
lowerCamelCase__ = get_cost_derivative(i - 1 )
lowerCamelCase__ = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__lowerCAmelCase , __lowerCAmelCase , atol=__lowerCAmelCase , rtol=__lowerCAmelCase , ):
break
lowerCamelCase__ = temp_parameter_vector
print(("""Number of iterations:""", j) )
def A__ ( ):
for i in range(len(__lowerCAmelCase ) ):
print(("""Actual output value:""", output(__lowerCAmelCase , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(__lowerCAmelCase , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 9 | 0 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
UpperCamelCase : List[str] = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
UpperCamelCase : Optional[Any] = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
UpperCamelCase : Optional[int] = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCamelCase__ (datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage="""http://www.cs.umd.edu/~snover/tercom/""" ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ,id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" ,id="""sequence""" ) ,id="""references""" ),
} ) ,codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] ,reference_urls=[
"""https://github.com/jhclark/tercom""",
] ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = False ,_lowerCAmelCase = False ,_lowerCAmelCase = False ,_lowerCAmelCase = False ,):
lowerCamelCase__ = len(references[0] )
if any(len(__a ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowerCamelCase__ = [[refs[i] for refs in references] for i in range(__a )]
lowerCamelCase__ = TER(
normalized=__a ,no_punct=__a ,asian_support=__a ,case_sensitive=__a ,)
lowerCamelCase__ = sb_ter.corpus_score(__a ,__a )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 716 |
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] ):
lowerCamelCase__ = OmegaConf.load(__lowerCAmelCase )
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )["""model"""]
lowerCamelCase__ = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCamelCase__ = {}
lowerCamelCase__ = """first_stage_model."""
for key in keys:
if key.startswith(__lowerCAmelCase ):
lowerCamelCase__ = state_dict[key]
# extract state_dict for UNetLDM
lowerCamelCase__ = {}
lowerCamelCase__ = """model.diffusion_model."""
for key in keys:
if key.startswith(__lowerCAmelCase ):
lowerCamelCase__ = state_dict[key]
lowerCamelCase__ = config.model.params.first_stage_config.params
lowerCamelCase__ = config.model.params.unet_config.params
lowerCamelCase__ = VQModel(**__lowerCAmelCase ).eval()
vqvae.load_state_dict(__lowerCAmelCase )
lowerCamelCase__ = UNetLDMModel(**__lowerCAmelCase ).eval()
unet.load_state_dict(__lowerCAmelCase )
lowerCamelCase__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__lowerCAmelCase , )
lowerCamelCase__ = LDMPipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
pipeline.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
UpperCamelCase : List[Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 9 | 0 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class UpperCamelCase__ (tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ):
super().__init__()
lowerCamelCase__ = pad_token_id
lowerCamelCase__ = max_length
lowerCamelCase__ = vocab
lowerCamelCase__ = merges
lowerCamelCase__ = BytePairTokenizer(__A ,__A ,sequence_length=__A )
@classmethod
def UpperCamelCase_ ( cls ,_lowerCAmelCase ,*_lowerCAmelCase ,**_lowerCAmelCase ):
lowerCamelCase__ = [""" """.join(__A ) for m in tokenizer.bpe_ranks.keys()]
lowerCamelCase__ = tokenizer.get_vocab()
return cls(__A ,__A ,*__A ,**__A )
@classmethod
def UpperCamelCase_ ( cls ,_lowerCAmelCase ,*_lowerCAmelCase ,**_lowerCAmelCase ):
lowerCamelCase__ = GPTaTokenizer.from_pretrained(__A ,*__A ,**__A )
return cls.from_tokenizer(__A ,*__A ,**__A )
@classmethod
def UpperCamelCase_ ( cls ,_lowerCAmelCase ):
return cls(**__A )
def UpperCamelCase_ ( self ):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = self.tf_tokenizer(__A )
lowerCamelCase__ = tf.ones_like(__A )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowerCamelCase__ = max_length if max_length is not None else self.max_length
if max_length is not None:
lowerCamelCase__ , lowerCamelCase__ = pad_model_inputs(
__A ,max_seq_length=__A ,pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 717 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS}
UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : tuple[int, ...] ):
lowerCamelCase__ = ""
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
for keychar, cipherchar in zip(cycle(__lowerCAmelCase ) , __lowerCAmelCase ):
lowerCamelCase__ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__lowerCAmelCase )
return decoded
def A__ ( __lowerCAmelCase : list[int] ):
lowerCamelCase__ = []
for key in product(__lowerCAmelCase , repeat=3 ):
lowerCamelCase__ = try_key(__lowerCAmelCase , __lowerCAmelCase )
if encoded is not None:
possibles.append(__lowerCAmelCase )
return possibles
def A__ ( __lowerCAmelCase : list[str] , __lowerCAmelCase : str ):
return [possible for possible in possibles if common_word in possible.lower()]
def A__ ( __lowerCAmelCase : str = "p059_cipher.txt" ):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = Path(__lowerCAmelCase ).parent.joinpath(__lowerCAmelCase ).read_text(encoding="""utf-8""" )
lowerCamelCase__ = [int(__lowerCAmelCase ) for number in data.strip().split(""",""" )]
lowerCamelCase__ = filter_valid_chars(__lowerCAmelCase )
for common_word in COMMON_WORDS:
lowerCamelCase__ = filter_common_word(__lowerCAmelCase , __lowerCAmelCase )
if len(__lowerCAmelCase ) == 1:
break
lowerCamelCase__ = possibles[0]
return sum(ord(__lowerCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F'{solution() = }')
| 9 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase : Any = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : int = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[Any] = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 718 |
'''simple docstring'''
import argparse
import struct
import unittest
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ):
lowerCamelCase__ = data
# Initialize hash values
lowerCamelCase__ = [
0x6a_09_e6_67,
0xbb_67_ae_85,
0x3c_6e_f3_72,
0xa5_4f_f5_3a,
0x51_0e_52_7f,
0x9b_05_68_8c,
0x1f_83_d9_ab,
0x5b_e0_cd_19,
]
# Initialize round constants
lowerCamelCase__ = [
0x42_8a_2f_98,
0x71_37_44_91,
0xb5_c0_fb_cf,
0xe9_b5_db_a5,
0x39_56_c2_5b,
0x59_f1_11_f1,
0x92_3f_82_a4,
0xab_1c_5e_d5,
0xd8_07_aa_98,
0x12_83_5b_01,
0x24_31_85_be,
0x55_0c_7d_c3,
0x72_be_5d_74,
0x80_de_b1_fe,
0x9b_dc_06_a7,
0xc1_9b_f1_74,
0xe4_9b_69_c1,
0xef_be_47_86,
0x0f_c1_9d_c6,
0x24_0c_a1_cc,
0x2d_e9_2c_6f,
0x4a_74_84_aa,
0x5c_b0_a9_dc,
0x76_f9_88_da,
0x98_3e_51_52,
0xa8_31_c6_6d,
0xb0_03_27_c8,
0xbf_59_7f_c7,
0xc6_e0_0b_f3,
0xd5_a7_91_47,
0x06_ca_63_51,
0x14_29_29_67,
0x27_b7_0a_85,
0x2e_1b_21_38,
0x4d_2c_6d_fc,
0x53_38_0d_13,
0x65_0a_73_54,
0x76_6a_0a_bb,
0x81_c2_c9_2e,
0x92_72_2c_85,
0xa2_bf_e8_a1,
0xa8_1a_66_4b,
0xc2_4b_8b_70,
0xc7_6c_51_a3,
0xd1_92_e8_19,
0xd6_99_06_24,
0xf4_0e_35_85,
0x10_6a_a0_70,
0x19_a4_c1_16,
0x1e_37_6c_08,
0x27_48_77_4c,
0x34_b0_bc_b5,
0x39_1c_0c_b3,
0x4e_d8_aa_4a,
0x5b_9c_ca_4f,
0x68_2e_6f_f3,
0x74_8f_82_ee,
0x78_a5_63_6f,
0x84_c8_78_14,
0x8c_c7_02_08,
0x90_be_ff_fa,
0xa4_50_6c_eb,
0xbe_f9_a3_f7,
0xc6_71_78_f2,
]
lowerCamelCase__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def UpperCamelCase_ ( _lowerCAmelCase ):
lowerCamelCase__ = B"""\x80""" + (B"""\x00""" * (63 - (len(_lowerCAmelCase ) + 8) % 64))
lowerCamelCase__ = struct.pack(""">Q""" ,(len(_lowerCAmelCase ) * 8) )
return data + padding + big_endian_integer
def UpperCamelCase_ ( self ):
# Convert into blocks of 64 bytes
lowerCamelCase__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowerCamelCase__ = list(struct.unpack(""">16L""" ,_lowerCAmelCase ) )
# add 48 0-ed integers
words += [0] * 48
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowerCamelCase__ = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
lowerCamelCase__ = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
lowerCamelCase__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
lowerCamelCase__ = self.ror(_lowerCAmelCase ,6 ) ^ self.ror(_lowerCAmelCase ,11 ) ^ self.ror(_lowerCAmelCase ,25 )
lowerCamelCase__ = (e & f) ^ ((~e & 0xff_ff_ff_ff) & g)
lowerCamelCase__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
lowerCamelCase__ = self.ror(_lowerCAmelCase ,2 ) ^ self.ror(_lowerCAmelCase ,13 ) ^ self.ror(_lowerCAmelCase ,22 )
lowerCamelCase__ = (a & b) ^ (a & c) ^ (b & c)
lowerCamelCase__ = (sa + maj) % 0x1_00_00_00_00
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
lowerCamelCase__ = [a, b, c, d, e, f, g, h]
# Modify final values
lowerCamelCase__ = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
lowerCamelCase__ = """""".join([hex(_lowerCAmelCase )[2:].zfill(8 ) for value in self.hashes] )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
return 0xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations)
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
import hashlib
lowerCamelCase__ = bytes("""Test String""" ,"""utf-8""" )
self.assertEqual(SHAaaa(_lowerCAmelCase ).hash ,hashlib.shaaaa(_lowerCAmelCase ).hexdigest() )
def A__ ( ):
import doctest
doctest.testmod()
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
lowerCamelCase__ = f.read()
else:
lowerCamelCase__ = bytes(__lowerCAmelCase , """utf-8""" )
print(SHAaaa(__lowerCAmelCase ).hash )
if __name__ == "__main__":
main()
| 9 | 0 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCamelCase : Optional[int] = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def A__ ( __lowerCAmelCase : List[str] ):
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def A__ ( __lowerCAmelCase : Optional[int] ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_ )
def A__ ( __lowerCAmelCase : List[str] ):
from transformers.testing_utils import pytest_terminal_summary_main
lowerCamelCase__ = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_ )
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowerCamelCase__ = 0
# Doctest custom flag to ignore output.
UpperCamelCase : Tuple = doctest.register_optionflag('IGNORE_RESULT')
UpperCamelCase : Dict = doctest.OutputChecker
class UpperCamelCase__ (a ):
'''simple docstring'''
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self ,A__ ,A__ ,A__ )
UpperCamelCase : str = CustomOutputChecker
UpperCamelCase : Dict = HfDoctestModule
UpperCamelCase : Optional[int] = HfDocTestParser
| 719 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def A__ ( __lowerCAmelCase : Union[str, Any] ):
lowerCamelCase__ = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : Tuple ):
lowerCamelCase__ , lowerCamelCase__ = emb.weight.shape
lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
lowerCamelCase__ = emb.weight.data
return lin_layer
def A__ ( __lowerCAmelCase : Dict ):
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
lowerCamelCase__ = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
lowerCamelCase__ = mam_aaa["""model"""]
remove_ignore_keys_(__lowerCAmelCase )
lowerCamelCase__ = state_dict["""encoder.embed_tokens.weight"""].shape[0]
lowerCamelCase__ = MaMaaaConfig(
vocab_size=__lowerCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
lowerCamelCase__ = state_dict["""decoder.embed_tokens.weight"""]
lowerCamelCase__ = MaMaaaForConditionalGeneration(__lowerCAmelCase )
model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
lowerCamelCase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
UpperCamelCase : Tuple = parser.parse_args()
UpperCamelCase : List[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 9 | 0 |
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
UpperCamelCase : Optional[int] = namedtuple('covid_data', 'cases deaths recovered')
def A__ ( __lowerCAmelCase : str = "https://www.worldometers.info/coronavirus/" ):
lowerCamelCase__ = """//div[@class = \"maincounter-number\"]/span/text()"""
return covid_data(*html.fromstring(requests.get(lowerCAmelCase__ ).content ).xpath(lowerCAmelCase__ ) )
UpperCamelCase : str = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 720 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = BlipImageProcessor()
lowerCamelCase__ = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
lowerCamelCase__ = BlipProcessor(_lowerCAmelCase ,_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).tokenizer
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).image_processor
def UpperCamelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
lowerCamelCase__ = [Image.fromarray(np.moveaxis(_lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self ):
lowerCamelCase__ = BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
lowerCamelCase__ = self.get_image_processor(do_normalize=_lowerCAmelCase ,padding_value=1.0 )
lowerCamelCase__ = BlipProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=_lowerCAmelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = image_processor(_lowerCAmelCase ,return_tensors="""np""" )
lowerCamelCase__ = processor(images=_lowerCAmelCase ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = processor(text=_lowerCAmelCase )
lowerCamelCase__ = tokenizer(_lowerCAmelCase ,return_token_type_ids=_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ = processor.batch_decode(_lowerCAmelCase )
lowerCamelCase__ = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
| 9 | 0 |
'''simple docstring'''
import os
from distutils.util import strtobool
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any ):
for e in env_keys:
lowerCamelCase__ = int(os.environ.get(_lowerCamelCase , -1 ) )
if val >= 0:
return val
return default
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any]=False ):
lowerCamelCase__ = os.environ.get(_lowerCamelCase , str(_lowerCamelCase ) )
return strtobool(_lowerCamelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple="no" ):
lowerCamelCase__ = os.environ.get(_lowerCamelCase , str(_lowerCamelCase ) )
return value
| 721 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def A__ ( __lowerCAmelCase : Union[str, Any] ):
if hor == 128:
lowerCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
lowerCamelCase__ = (32, 128, 256)
lowerCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
lowerCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
lowerCamelCase__ = (32, 64, 128, 256)
lowerCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
lowerCamelCase__ = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
lowerCamelCase__ = model.state_dict()
lowerCamelCase__ = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_5536,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
lowerCamelCase__ = UNetaDModel(**__lowerCAmelCase )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase__ = state_dict.pop(__lowerCAmelCase )
hf_value_function.load_state_dict(__lowerCAmelCase )
torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , """w""" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( ):
lowerCamelCase__ = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 128, 256),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_5536,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
lowerCamelCase__ = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
lowerCamelCase__ = model
lowerCamelCase__ = UNetaDModel(**__lowerCAmelCase )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase__ = state_dict.pop(__lowerCAmelCase )
hf_value_function.load_state_dict(__lowerCAmelCase )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 9 | 0 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowerCamelCase__ = FlaxDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" ,safety_checker=_lowerCAmelCase ,cache_dir=_lowerCAmelCase )
lowerCamelCase__ = [t[-1] for t in os.walk(os.path.join(_lowerCAmelCase ,os.listdir(_lowerCAmelCase )[0] ,"""snapshots""" ) )]
lowerCamelCase__ = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(""".bin""" ) for f in files )
@slow
@require_flax
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
lowerCamelCase__ , lowerCamelCase__ = FlaxStableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" ,safety_checker=_lowerCAmelCase )
lowerCamelCase__ = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
lowerCamelCase__ = jax.random.PRNGKey(0 )
lowerCamelCase__ = 4
lowerCamelCase__ = jax.device_count()
lowerCamelCase__ = num_samples * [prompt]
lowerCamelCase__ = pipeline.prepare_inputs(_lowerCAmelCase )
# shard inputs and rng
lowerCamelCase__ = replicate(_lowerCAmelCase )
lowerCamelCase__ = jax.random.split(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = shard(_lowerCAmelCase )
lowerCamelCase__ = pipeline(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,jit=_lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 4.151_4745 ) < 1E-3
assert np.abs(np.abs(_lowerCAmelCase ,dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1
lowerCamelCase__ = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_lowerCAmelCase ) == num_samples
def UpperCamelCase_ ( self ):
lowerCamelCase__ , lowerCamelCase__ = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" ,revision="""flax""" ,safety_checker=_lowerCAmelCase )
lowerCamelCase__ = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
lowerCamelCase__ = jax.random.PRNGKey(0 )
lowerCamelCase__ = 50
lowerCamelCase__ = jax.device_count()
lowerCamelCase__ = num_samples * [prompt]
lowerCamelCase__ = pipeline.prepare_inputs(_lowerCAmelCase )
# shard inputs and rng
lowerCamelCase__ = replicate(_lowerCAmelCase )
lowerCamelCase__ = jax.random.split(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = shard(_lowerCAmelCase )
lowerCamelCase__ = pipeline(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,jit=_lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0565_2401) ) < 1E-3
assert np.abs((np.abs(_lowerCAmelCase ,dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1
def UpperCamelCase_ ( self ):
lowerCamelCase__ , lowerCamelCase__ = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" ,revision="""bf16""" ,dtype=jnp.bfloataa ,safety_checker=_lowerCAmelCase )
lowerCamelCase__ = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
lowerCamelCase__ = jax.random.PRNGKey(0 )
lowerCamelCase__ = 50
lowerCamelCase__ = jax.device_count()
lowerCamelCase__ = num_samples * [prompt]
lowerCamelCase__ = pipeline.prepare_inputs(_lowerCAmelCase )
# shard inputs and rng
lowerCamelCase__ = replicate(_lowerCAmelCase )
lowerCamelCase__ = jax.random.split(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = shard(_lowerCAmelCase )
lowerCamelCase__ = pipeline(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,jit=_lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(_lowerCAmelCase ,dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def UpperCamelCase_ ( self ):
lowerCamelCase__ , lowerCamelCase__ = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" ,revision="""bf16""" ,dtype=jnp.bfloataa )
lowerCamelCase__ = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
lowerCamelCase__ = jax.random.PRNGKey(0 )
lowerCamelCase__ = 50
lowerCamelCase__ = jax.device_count()
lowerCamelCase__ = num_samples * [prompt]
lowerCamelCase__ = pipeline.prepare_inputs(_lowerCAmelCase )
# shard inputs and rng
lowerCamelCase__ = replicate(_lowerCAmelCase )
lowerCamelCase__ = jax.random.split(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = shard(_lowerCAmelCase )
lowerCamelCase__ = pipeline(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,jit=_lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(_lowerCAmelCase ,dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def UpperCamelCase_ ( self ):
lowerCamelCase__ = FlaxDDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,set_alpha_to_one=_lowerCAmelCase ,steps_offset=1 ,)
lowerCamelCase__ , lowerCamelCase__ = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" ,revision="""bf16""" ,dtype=jnp.bfloataa ,scheduler=_lowerCAmelCase ,safety_checker=_lowerCAmelCase ,)
lowerCamelCase__ = scheduler.create_state()
lowerCamelCase__ = scheduler_state
lowerCamelCase__ = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
lowerCamelCase__ = jax.random.PRNGKey(0 )
lowerCamelCase__ = 50
lowerCamelCase__ = jax.device_count()
lowerCamelCase__ = num_samples * [prompt]
lowerCamelCase__ = pipeline.prepare_inputs(_lowerCAmelCase )
# shard inputs and rng
lowerCamelCase__ = replicate(_lowerCAmelCase )
lowerCamelCase__ = jax.random.split(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = shard(_lowerCAmelCase )
lowerCamelCase__ = pipeline(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,jit=_lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0_4504_3945) ) < 1E-3
assert np.abs((np.abs(_lowerCAmelCase ,dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1
def UpperCamelCase_ ( self ):
lowerCamelCase__ = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
lowerCamelCase__ = jax.device_count()
lowerCamelCase__ = num_samples * [prompt]
lowerCamelCase__ = jax.random.split(jax.random.PRNGKey(0 ) ,_lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" ,revision="""bf16""" ,dtype=jnp.bfloataa ,safety_checker=_lowerCAmelCase ,)
lowerCamelCase__ = replicate(_lowerCAmelCase )
lowerCamelCase__ = pipeline.prepare_inputs(_lowerCAmelCase )
lowerCamelCase__ = shard(_lowerCAmelCase )
lowerCamelCase__ = pipeline(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,jit=_lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
lowerCamelCase__ = images[2, 0, 2_56, 10:17, 1]
# With memory efficient attention
lowerCamelCase__ , lowerCamelCase__ = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" ,revision="""bf16""" ,dtype=jnp.bfloataa ,safety_checker=_lowerCAmelCase ,use_memory_efficient_attention=_lowerCAmelCase ,)
lowerCamelCase__ = replicate(_lowerCAmelCase )
lowerCamelCase__ = pipeline.prepare_inputs(_lowerCAmelCase )
lowerCamelCase__ = shard(_lowerCAmelCase )
lowerCamelCase__ = pipeline(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,jit=_lowerCAmelCase ).images
assert images_eff.shape == (num_samples, 1, 5_12, 5_12, 3)
lowerCamelCase__ = images[2, 0, 2_56, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 700 |
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase = "▁" ,_lowerCAmelCase = True ,_lowerCAmelCase = "<unk>" ,_lowerCAmelCase = "</s>" ,_lowerCAmelCase = "<pad>" ,):
lowerCamelCase__ = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
lowerCamelCase__ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
lowerCamelCase__ = token_dict["""token"""]
lowerCamelCase__ = Tokenizer(Unigram() )
lowerCamelCase__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ) ,""" """ ),
normalizers.Lowercase(),
] )
lowerCamelCase__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ),
pre_tokenizers.Digits(individual_digits=_lowerCAmelCase ),
pre_tokenizers.Punctuation(),
] )
lowerCamelCase__ = decoders.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase )
lowerCamelCase__ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' ,special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] ,)
lowerCamelCase__ = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,):
lowerCamelCase__ = trainers.UnigramTrainer(
vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,)
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = [files]
self._tokenizer.train(_lowerCAmelCase ,trainer=_lowerCAmelCase )
self.add_unk_id()
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,):
lowerCamelCase__ = trainers.UnigramTrainer(
vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,)
self._tokenizer.train_from_iterator(_lowerCAmelCase ,trainer=_lowerCAmelCase )
self.add_unk_id()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = json.loads(self._tokenizer.to_str() )
lowerCamelCase__ = self.special_tokens["""unk"""]["""id"""]
lowerCamelCase__ = Tokenizer.from_str(json.dumps(_lowerCAmelCase ) )
| 9 | 0 |
'''simple docstring'''
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def A__ ( __lowerCAmelCase : Tuple=None ):
if subparsers is not None:
lowerCamelCase__ = subparsers.add_parser("""test""" )
else:
lowerCamelCase__ = argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" , default=SCREAMING_SNAKE_CASE_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
return parser
def A__ ( __lowerCAmelCase : Any ):
lowerCamelCase__ = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
lowerCamelCase__ = script_name
else:
lowerCamelCase__ = F'''--config_file={args.config_file} {script_name}'''
lowerCamelCase__ = ["accelerate-launch"] + test_args.split()
lowerCamelCase__ = execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def A__ ( ):
lowerCamelCase__ = test_command_parser()
lowerCamelCase__ = parser.parse_args()
test_command(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 701 |
'''simple docstring'''
from __future__ import annotations
import math
def A__ ( __lowerCAmelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
UpperCamelCase : str = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def A__ ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
lowerCamelCase__ = []
for num in range(len(__lowerCAmelCase ) ):
lowerCamelCase__ = 0
while 2 * i * i <= odd_composites[num]:
lowerCamelCase__ = odd_composites[num] - 2 * i * i
if is_prime(__lowerCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__lowerCAmelCase ) == n:
return list_nums
return []
def A__ ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 9 | 0 |
'''simple docstring'''
from math import factorial
def A__ ( __lowerCAmelCase : Any = 20 ):
lowerCamelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
lowerCamelCase__ = n // 2
return int(factorial(SCREAMING_SNAKE_CASE_ ) / (factorial(SCREAMING_SNAKE_CASE_ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
UpperCamelCase : Optional[int] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 702 |
'''simple docstring'''
def A__ ( ):
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(__lowerCAmelCase , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 9 | 0 |
'''simple docstring'''
UpperCamelCase : List[str] = range(2, 20 + 1)
UpperCamelCase : Union[str, Any] = [10**k for k in range(ks[-1] + 1)]
UpperCamelCase : Dict = {}
def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : str ):
lowerCamelCase__ = sum(a_i[j] for j in range(__lowercase , len(__lowercase ) ) )
lowerCamelCase__ = sum(a_i[j] * base[j] for j in range(min(len(__lowercase ) , __lowercase ) ) )
lowerCamelCase__ = 0, 0
lowerCamelCase__ = n - i
lowerCamelCase__ = memo.get(__lowercase )
if sub_memo is not None:
lowerCamelCase__ = sub_memo.get(__lowercase )
if jumps is not None and len(__lowercase ) > 0:
# find and make the largest jump without going over
lowerCamelCase__ = -1
for _k in range(len(__lowercase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowerCamelCase__ = _k
break
if max_jump >= 0:
lowerCamelCase__ = jumps[max_jump]
# since the difference between jumps is cached, add c
lowerCamelCase__ = diff + c
for j in range(min(__lowercase , len(__lowercase ) ) ):
lowerCamelCase__ = divmod(__lowercase , 10 )
if new_c > 0:
add(__lowercase , __lowercase , __lowercase )
else:
lowerCamelCase__ = []
else:
lowerCamelCase__ = {c: []}
lowerCamelCase__ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowerCamelCase__ = next_term(__lowercase , k - 1 , i + dn , __lowercase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowerCamelCase__ = compute(__lowercase , __lowercase , i + dn , __lowercase )
diff += _diff
dn += terms_jumped
lowerCamelCase__ = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowerCamelCase__ = 0
while j < len(__lowercase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__lowercase , (diff, dn, k) )
return (diff, dn)
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ):
if i >= n:
return 0, i
if k > len(__lowercase ):
a_i.extend([0 for _ in range(k - len(__lowercase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowerCamelCase__ = i
lowerCamelCase__ = 0, 0, 0
for j in range(len(__lowercase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowerCamelCase__ = ds_c + ds_b
diff += addend
lowerCamelCase__ = 0
for j in range(__lowercase ):
lowerCamelCase__ = a_i[j] + addend
lowerCamelCase__ = divmod(__lowercase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__lowercase , __lowercase , __lowercase )
return diff, i - start_i
def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any ):
for j in range(__lowercase , len(__lowercase ) ):
lowerCamelCase__ = digits[j] + addend
if s >= 10:
lowerCamelCase__ = divmod(__lowercase , 10 )
lowerCamelCase__ = addend // 10 + quotient
else:
lowerCamelCase__ = s
lowerCamelCase__ = addend // 10
if addend == 0:
break
while addend > 0:
lowerCamelCase__ = divmod(__lowercase , 10 )
digits.append(__lowercase )
def A__ ( __lowerCAmelCase : int = 10**15 ):
lowerCamelCase__ = [1]
lowerCamelCase__ = 1
lowerCamelCase__ = 0
while True:
lowerCamelCase__ = next_term(__lowercase , 20 , i + dn , __lowercase )
dn += terms_jumped
if dn == n - i:
break
lowerCamelCase__ = 0
for j in range(len(__lowercase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'{solution() = }')
| 703 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'}
UpperCamelCase : Dict = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
UpperCamelCase : List[Any] = {
'camembert-base': 5_12,
}
UpperCamelCase : List[str] = '▁'
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = ['input_ids', 'attention_mask']
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="<unk>" ,_lowerCAmelCase="<pad>" ,_lowerCAmelCase="<mask>" ,_lowerCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] ,_lowerCAmelCase = None ,**_lowerCAmelCase ,):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else mask_token
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase ,eos_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,additional_special_tokens=_lowerCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_lowerCAmelCase ,)
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
lowerCamelCase__ = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowerCamelCase__ = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3}
lowerCamelCase__ = len(self.fairseq_tokens_to_ids )
lowerCamelCase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
lowerCamelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase ,token_ids_a=_lowerCAmelCase ,already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase_ ( self ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
return self.sp_model.encode(_lowerCAmelCase ,out_type=_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_lowerCAmelCase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = []
lowerCamelCase__ = """"""
lowerCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowerCamelCase__ = True
lowerCamelCase__ = []
else:
current_sub_tokens.append(_lowerCAmelCase )
lowerCamelCase__ = False
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def __getstate__( self ):
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
return state
def __setstate__( self ,_lowerCAmelCase ):
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ = os.path.join(
_lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase ,"""wb""" ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 9 | 0 |
'''simple docstring'''
import json
import sys
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ):
with open(__snake_case , encoding="""utf-8""" ) as f:
lowerCamelCase__ = json.load(__snake_case )
lowerCamelCase__ = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(__snake_case ):
lowerCamelCase__ = results[benchmark_name]
lowerCamelCase__ = benchmark_name.split("""/""" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
lowerCamelCase__ = """| metric |"""
lowerCamelCase__ = """|--------|"""
lowerCamelCase__ = """| new / old (diff) |"""
for metric_name in sorted(__snake_case ):
lowerCamelCase__ = benchmark_res[metric_name]
lowerCamelCase__ = metric_vals["""new"""]
lowerCamelCase__ = metric_vals.get("""old""" , __snake_case )
lowerCamelCase__ = metric_vals.get("""diff""" , __snake_case )
lowerCamelCase__ = F''' {new_val:f}''' if isinstance(__snake_case , (int, float) ) else """None"""
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(__snake_case , (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(__snake_case , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(__snake_case ) )
if __name__ == "__main__":
UpperCamelCase : List[str] = sys.argv[1]
UpperCamelCase : Any = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 704 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = R"""\w+[.]\d+"""
lowerCamelCase__ = re.findall(__lowerCAmelCase , __lowerCAmelCase )
for pat in pats:
lowerCamelCase__ = key.replace(__lowerCAmelCase , """_""".join(pat.split(""".""" ) ) )
return key
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ):
lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCamelCase__ = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
lowerCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase__ = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase__ = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple=42 ):
# Step 1: Convert pytorch tensor to numpy
lowerCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCamelCase__ = flax_model.init_weights(PRNGKey(__lowerCAmelCase ) )
lowerCamelCase__ = flatten_dict(__lowerCAmelCase )
lowerCamelCase__ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase__ = rename_key(__lowerCAmelCase )
lowerCamelCase__ = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
lowerCamelCase__ , lowerCamelCase__ = rename_key_and_reshape_tensor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
lowerCamelCase__ = jnp.asarray(__lowerCAmelCase )
return unflatten_dict(__lowerCAmelCase )
| 9 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCamelCase : Tuple = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : int=None ):
# Initialise PyTorch model
lowerCamelCase__ = XLNetConfig.from_json_file(lowerCAmelCase__ )
lowerCamelCase__ = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
lowerCamelCase__ = finetuning_task
lowerCamelCase__ = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowerCamelCase__ = XLNetForSequenceClassification(lowerCAmelCase__ )
elif "squad" in finetuning_task:
lowerCamelCase__ = finetuning_task
lowerCamelCase__ = XLNetForQuestionAnswering(lowerCAmelCase__ )
else:
lowerCamelCase__ = XLNetLMHeadModel(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
lowerCamelCase__ = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCamelCase__ = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
print(F'''Save PyTorch model to {os.path.abspath(lowerCAmelCase__ )}''' )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(F'''Save configuration file to {os.path.abspath(lowerCAmelCase__ )}''' )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
UpperCamelCase : int = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 705 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] ,model_result["""ss"""] ):
lowerCamelCase__ = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sgugger/tiny-distilbert-classification"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,only_pretrain_model=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """patrickvonplaten/t5-tiny-random"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,configs=[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,"""Cannot do xla on CPU.""" )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=_lowerCAmelCase ,save_to_csv=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ,inference_memory_csv_file=os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ,env_info_csv_file=os.path.join(_lowerCAmelCase ,"""env.csv""" ) ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""env.csv""" ) ).exists() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(_lowerCAmelCase ):
self.assertTrue(hasattr(_lowerCAmelCase ,"""sequential""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""cumulative""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""current""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(_lowerCAmelCase ,"""log.txt""" ) ,log_print=_lowerCAmelCase ,trace_memory_line_by_line=_lowerCAmelCase ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""log.txt""" ) ).exists() )
| 9 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase__ (__lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase = '''ClapFeatureExtractor'''
_UpperCamelCase = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ):
super().__init__(a_ ,a_ )
def __call__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,**_lowerCAmelCase ):
lowerCamelCase__ = kwargs.pop("""sampling_rate""" ,a_ )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
lowerCamelCase__ = self.tokenizer(a_ ,return_tensors=a_ ,**a_ )
if audios is not None:
lowerCamelCase__ = self.feature_extractor(
a_ ,sampling_rate=a_ ,return_tensors=a_ ,**a_ )
if text is not None and audios is not None:
lowerCamelCase__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) ,tensor_type=a_ )
def UpperCamelCase_ ( self ,*_lowerCAmelCase ,**_lowerCAmelCase ):
return self.tokenizer.batch_decode(*a_ ,**a_ )
def UpperCamelCase_ ( self ,*_lowerCAmelCase ,**_lowerCAmelCase ):
return self.tokenizer.decode(*a_ ,**a_ )
@property
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.tokenizer.model_input_names
lowerCamelCase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 706 |
'''simple docstring'''
from math import factorial
UpperCamelCase : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def A__ ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__lowerCAmelCase ) )
def A__ ( __lowerCAmelCase : int = 60 , __lowerCAmelCase : int = 100_0000 ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
lowerCamelCase__ = 0
# the cached sizes of the previous chains
lowerCamelCase__ = {}
for start_chain_element in range(1 , __lowerCAmelCase ):
# The temporary set will contain the elements of the chain
lowerCamelCase__ = set()
lowerCamelCase__ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCamelCase__ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__lowerCAmelCase )
chain_set_length += 1
lowerCamelCase__ = digit_factorial_sum(__lowerCAmelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCamelCase__ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution()}')
| 9 | 0 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = ''
_UpperCamelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_UpperCamelCase = None # compression type in fsspec. ex: "gzip"
_UpperCamelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self ,_lowerCAmelCase = "" ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,**_lowerCAmelCase ):
super().__init__(self ,**_lowerCAmelCase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase__ = fsspec.open(
_lowerCAmelCase ,mode="""rb""" ,protocol=_lowerCAmelCase ,compression=self.compression ,client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" ,{} ), # To avoid issues if it was already passed.
} ,**(target_options or {}) ,)
lowerCamelCase__ = os.path.basename(self.file.path.split("""::""" )[0] )
lowerCamelCase__ = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
lowerCamelCase__ = None
@classmethod
def UpperCamelCase_ ( cls ,_lowerCAmelCase ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(_lowerCAmelCase ).lstrip("""/""" )
def UpperCamelCase_ ( self ):
if self.dir_cache is None:
lowerCamelCase__ = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
lowerCamelCase__ = {f["""name"""]: f}
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
return self.file.open().read()
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = "rb" ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,):
lowerCamelCase__ = self._strip_protocol(_lowerCAmelCase )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'bz2'
_UpperCamelCase = 'bz2'
_UpperCamelCase = '.bz2'
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'gzip'
_UpperCamelCase = 'gzip'
_UpperCamelCase = '.gz'
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'lz4'
_UpperCamelCase = 'lz4'
_UpperCamelCase = '.lz4'
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'xz'
_UpperCamelCase = 'xz'
_UpperCamelCase = '.xz'
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'zstd'
_UpperCamelCase = 'zstd'
_UpperCamelCase = '.zst'
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = "rb" ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = DEFAULT_BLOCK_SIZE ,**_lowerCAmelCase ,):
super().__init__(
fo=_lowerCAmelCase ,mode=_lowerCAmelCase ,target_protocol=_lowerCAmelCase ,target_options=_lowerCAmelCase ,block_size=_lowerCAmelCase ,**_lowerCAmelCase ,)
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase__ = self.file.__enter__
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ):
lowerCamelCase__ = file_
def __enter__( self ):
self._file.__enter__()
return self
def __exit__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ):
self._file.__exit__(*_lowerCAmelCase ,**_lowerCAmelCase )
def __iter__( self ):
return iter(self._file )
def UpperCamelCase_ ( self ):
return next(self._file )
def __getattr__( self ,_lowerCAmelCase ):
return getattr(self._file ,_lowerCAmelCase )
def fixed_enter(*_lowerCAmelCase ,**_lowerCAmelCase ):
return WrappedFile(_enter(*_lowerCAmelCase ,**_lowerCAmelCase ) )
lowerCamelCase__ = fixed_enter
| 707 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
UpperCamelCase : Optional[Any] = 'src/diffusers'
# Matches is_xxx_available()
UpperCamelCase : Union[str, Any] = re.compile(r'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
UpperCamelCase : Optional[Any] = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
UpperCamelCase : Optional[int] = '\n{0} = None\n'
UpperCamelCase : Optional[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
UpperCamelCase : Any = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def A__ ( __lowerCAmelCase : Union[str, Any] ):
lowerCamelCase__ = _re_backend.findall(__lowerCAmelCase )
if len(__lowerCAmelCase ) == 0:
return None
return "_and_".join(__lowerCAmelCase )
def A__ ( ):
with open(os.path.join(__lowerCAmelCase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase__ = 0
lowerCamelCase__ = {}
# Go through the end of the file
while line_index < len(__lowerCAmelCase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
lowerCamelCase__ = []
# Until we unindent, add backend objects to the list
while line_index < len(__lowerCAmelCase ) and len(lines[line_index] ) > 1:
lowerCamelCase__ = lines[line_index]
lowerCamelCase__ = _re_single_line_import.search(__lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__lowerCAmelCase ) > 0:
lowerCamelCase__ = objects
else:
line_index += 1
return backend_specific_objects
def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict ):
if name.isupper():
return DUMMY_CONSTANT.format(__lowerCAmelCase )
elif name.islower():
return DUMMY_FUNCTION.format(__lowerCAmelCase , __lowerCAmelCase )
else:
return DUMMY_CLASS.format(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : Optional[int]=None ):
if backend_specific_objects is None:
lowerCamelCase__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase__ = {}
for backend, objects in backend_specific_objects.items():
lowerCamelCase__ = """[""" + """, """.join(F'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]"""
lowerCamelCase__ = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__lowerCAmelCase , __lowerCAmelCase ) for o in objects] )
lowerCamelCase__ = dummy_file
return dummy_files
def A__ ( __lowerCAmelCase : List[str]=False ):
lowerCamelCase__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase__ = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
lowerCamelCase__ = os.path.join(__lowerCAmelCase , """utils""" )
lowerCamelCase__ = {
backend: os.path.join(__lowerCAmelCase , F'''dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py''' )
for backend in dummy_files.keys()
}
lowerCamelCase__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__lowerCAmelCase ):
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.read()
else:
lowerCamelCase__ = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py as the main '''
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
F'''diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py. Run `make fix-copies` '''
"""to fix this.""" )
if __name__ == "__main__":
UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase : Any = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 9 | 0 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : int = 50 ):
lowerCamelCase__ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F'{solution() = }')
| 708 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase : List[Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ (a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = GPTSwaTokenizer
_UpperCamelCase = False
_UpperCamelCase = True
_UpperCamelCase = False
def UpperCamelCase_ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ,eos_token="""<unk>""" ,bos_token="""<unk>""" ,pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = """This is a test"""
lowerCamelCase__ = """This is a test"""
return input_text, output_text
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """<s>"""
lowerCamelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) ,_lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""j""" )
self.assertEqual(len(_lowerCAmelCase ) ,20_00 )
def UpperCamelCase_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,20_00 )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase )
lowerCamelCase__ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowerCAmelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) ,[4_65, 2_87, 2_65, 6_31, 8_42] )
lowerCamelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
_lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ,)
# fmt: on
lowerCamelCase__ = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase ,[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] ,)
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
# fmt: off
self.assertListEqual(
_lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def UpperCamelCase_ ( self ):
lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase )
lowerCamelCase__ = ["""This is a test""", """I was born in 92000, and this is falsé."""]
lowerCamelCase__ = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ):
self.assertListEqual(tokenizer.encode_fast(_lowerCAmelCase ) ,_lowerCAmelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ):
self.assertEqual(tokenizer.decode_fast(_lowerCAmelCase ) ,_lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
lowerCamelCase__ = {"""input_ids""": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase ,model_name="""AI-Sweden/gpt-sw3-126m""" ,sequences=_lowerCAmelCase ,)
| 9 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import gcd
def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] = 2 , __lowerCAmelCase : Optional[int] = 1 , __lowerCAmelCase : List[Any] = 3 , ):
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> int:
return (pow(A_ , 2 ) + step) % modulus
for _ in range(A_ ):
# These track the position within the cycle detection logic.
lowerCamelCase__ = seed
lowerCamelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCamelCase__ = rand_fn(A_ , A_ , A_ )
lowerCamelCase__ = rand_fn(A_ , A_ , A_ )
lowerCamelCase__ = rand_fn(A_ , A_ , A_ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCamelCase__ = gcd(hare - tortoise , A_ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCamelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
UpperCamelCase : str = argparse.ArgumentParser()
parser.add_argument(
'num',
type=int,
help='The value to find a divisor of',
)
parser.add_argument(
'--attempts',
type=int,
default=3,
help='The number of attempts before giving up',
)
UpperCamelCase : Dict = parser.parse_args()
UpperCamelCase : List[Any] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'{args.num} is probably prime')
else:
UpperCamelCase : Tuple = args.num // divisor
print(F'{args.num} = {divisor} * {quotient}')
| 709 |
'''simple docstring'''
from manim import *
class UpperCamelCase__ (a ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
lowerCamelCase__ = Rectangle(height=0.5 ,width=0.5 )
lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = VGroup(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""CPU""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCAmelCase )
lowerCamelCase__ = [mem.copy() for i in range(1 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""GPU""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
gpu.align_to(_lowerCAmelCase ,_lowerCAmelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_lowerCAmelCase )
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""Model""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,)
lowerCamelCase__ = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' ,font_size=24 ,)
lowerCamelCase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase__ = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCAmelCase ,run_time=2.5 ) ,Write(_lowerCAmelCase ) ,Write(_lowerCAmelCase ) )
self.add(_lowerCAmelCase )
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
for i, rect in enumerate(_lowerCAmelCase ):
lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase ,opacity=0.7 )
cpu_target.move_to(_lowerCAmelCase )
cpu_target.generate_target()
lowerCamelCase__ = 0.46 / 4
lowerCamelCase__ = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=_lowerCAmelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=_lowerCAmelCase ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=_lowerCAmelCase ,buff=0.0 )
cpu_targs.append(_lowerCAmelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCAmelCase ) )
second_animations.append(MoveToTarget(_lowerCAmelCase ,run_time=1.5 ) )
self.play(*_lowerCAmelCase )
self.play(*_lowerCAmelCase )
self.wait()
| 9 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase : Union[str, Any] = {
'''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''],
'''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : str = ['''MaskFormerFeatureExtractor''']
UpperCamelCase : Union[str, Any] = ['''MaskFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[Any] = [
'''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MaskFormerForInstanceSegmentation''',
'''MaskFormerModel''',
'''MaskFormerPreTrainedModel''',
]
UpperCamelCase : Optional[int] = [
'''MaskFormerSwinBackbone''',
'''MaskFormerSwinModel''',
'''MaskFormerSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 710 |
'''simple docstring'''
UpperCamelCase : Tuple = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
UpperCamelCase : list[bool | None] = [None] * 10_00_00_00
UpperCamelCase : Tuple = True
UpperCamelCase : Optional[int] = False
def A__ ( __lowerCAmelCase : int ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase__ = chain(next_number(__lowerCAmelCase ) )
lowerCamelCase__ = number_chain
while number < 1000_0000:
lowerCamelCase__ = number_chain
number *= 10
return number_chain
def A__ ( __lowerCAmelCase : int = 1000_0000 ):
for i in range(1 , __lowerCAmelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution() = }')
| 9 | 0 |
'''simple docstring'''
UpperCamelCase : List[str] = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '\"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCamelCase : int = {value: key for key, value in MORSE_CODE_DICT.items()}
def A__ ( __lowerCAmelCase : Optional[int] ):
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def A__ ( __lowerCAmelCase : Any ):
return "".join(REVERSE_DICT[char] for char in message.split() )
def A__ ( ):
lowerCamelCase__ = """Morse code here!"""
print(__snake_case )
lowerCamelCase__ = encrypt(__snake_case )
print(__snake_case )
lowerCamelCase__ = decrypt(__snake_case )
print(__snake_case )
if __name__ == "__main__":
main()
| 711 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'donut-swin'
_UpperCamelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self ,_lowerCAmelCase=2_24 ,_lowerCAmelCase=4 ,_lowerCAmelCase=3 ,_lowerCAmelCase=96 ,_lowerCAmelCase=[2, 2, 6, 2] ,_lowerCAmelCase=[3, 6, 12, 24] ,_lowerCAmelCase=7 ,_lowerCAmelCase=4.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=False ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-5 ,**_lowerCAmelCase ,):
super().__init__(**_lowerCAmelCase )
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = embed_dim
lowerCamelCase__ = depths
lowerCamelCase__ = len(_lowerCAmelCase )
lowerCamelCase__ = num_heads
lowerCamelCase__ = window_size
lowerCamelCase__ = mlp_ratio
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = hidden_act
lowerCamelCase__ = use_absolute_embeddings
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
| 9 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class UpperCamelCase__ (lowercase_ ):
'''simple docstring'''
_UpperCamelCase = 42
_UpperCamelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.26.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('>=', '0.0.12')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class UpperCamelCase__ (lowercase_ ):
'''simple docstring'''
_UpperCamelCase = 42
_UpperCamelCase = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 712 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
UpperCamelCase : Optional[Any] = ['small', 'medium', 'large']
UpperCamelCase : Dict = 'lm_head.decoder.weight'
UpperCamelCase : int = 'lm_head.weight'
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
lowerCamelCase__ = torch.load(__lowerCAmelCase )
lowerCamelCase__ = d.pop(__lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
UpperCamelCase : Dict = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
UpperCamelCase : Any = os.path.join(args.dialogpt_path, F'{MODEL}_ft.pkl')
UpperCamelCase : str = F'./DialoGPT-{MODEL}'
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 9 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class UpperCamelCase__ (__lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase = 'big_bird'
def __init__( self ,_lowerCAmelCase=5_03_58 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=30_72 ,_lowerCAmelCase="gelu_new" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=40_96 ,_lowerCAmelCase=2 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-12 ,_lowerCAmelCase=True ,_lowerCAmelCase=0 ,_lowerCAmelCase=1 ,_lowerCAmelCase=2 ,_lowerCAmelCase=66 ,_lowerCAmelCase="block_sparse" ,_lowerCAmelCase=True ,_lowerCAmelCase=False ,_lowerCAmelCase=64 ,_lowerCAmelCase=3 ,_lowerCAmelCase=None ,**_lowerCAmelCase ,):
super().__init__(
pad_token_id=UpperCamelCase_ ,bos_token_id=UpperCamelCase_ ,eos_token_id=UpperCamelCase_ ,sep_token_id=UpperCamelCase_ ,**UpperCamelCase_ ,)
lowerCamelCase__ = vocab_size
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = initializer_range
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = use_cache
lowerCamelCase__ = rescale_embeddings
lowerCamelCase__ = attention_type
lowerCamelCase__ = use_bias
lowerCamelCase__ = block_size
lowerCamelCase__ = num_random_blocks
lowerCamelCase__ = classifier_dropout
class UpperCamelCase__ (__lowerCamelCase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ):
if self.task == "multiple-choice":
lowerCamelCase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 713 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=30 ,_lowerCAmelCase=2 ,_lowerCAmelCase=3 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=32 ,_lowerCAmelCase=2 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=10 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=3 ,_lowerCAmelCase=0.6 ,_lowerCAmelCase=None ,):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = is_training
lowerCamelCase__ = use_labels
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = mask_ratio
lowerCamelCase__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase__ = (image_size // patch_size) ** 2
lowerCamelCase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCamelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ):
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,decoder_hidden_size=self.hidden_size ,decoder_num_hidden_layers=self.num_hidden_layers ,decoder_num_attention_heads=self.num_attention_heads ,decoder_intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = TFViTMAEModel(config=_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase )
# expected sequence length = num_patches
lowerCamelCase__ = (self.image_size // self.patch_size) ** 2
lowerCamelCase__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase__ = 1
lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase )
lowerCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase )
lowerCamelCase__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.prepare_config_and_inputs()
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = config_and_inputs
lowerCamelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase__ (a ,a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_UpperCamelCase = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def UpperCamelCase_ ( self ):
lowerCamelCase__ = TFViTMAEModelTester(self )
lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase ,hidden_size=37 )
def UpperCamelCase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
lowerCamelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase ,tf.keras.layers.Layer ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ = [*signature.parameters.keys()]
lowerCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
# make the mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
lowerCamelCase__ = copy.deepcopy(self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) )
lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase )
lowerCamelCase__ = outputs_dict[0].numpy()
lowerCamelCase__ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) ,1E-6 )
def UpperCamelCase_ ( self ):
# make the mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_lowerCAmelCase ):
lowerCamelCase__ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_lowerCAmelCase ):
lowerCamelCase__ = v.numpy()
else:
lowerCamelCase__ = np.array(_lowerCAmelCase )
return inputs_np_dict
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = prepare_numpy_arrays(_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase )
self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
# make masks reproducible
np.random.seed(2 )
lowerCamelCase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ = tf.constant(_lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase__ = tf_noise
super().check_pt_tf_models(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_lowerCAmelCase )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(_lowerCAmelCase ,_lowerCAmelCase ),)
if isinstance(_lowerCAmelCase ,_lowerCAmelCase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_lowerCAmelCase ,"""_keras_serializable""" ,_lowerCAmelCase )
}
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ = tf.convert_to_tensor(_lowerCAmelCase )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
lowerCamelCase__ = main_layer_class(_lowerCAmelCase )
lowerCamelCase__ = {
name: tf.keras.Input(tensor.shape[1:] ,dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowerCamelCase__ = tf.keras.Model(_lowerCAmelCase ,outputs=main_layer(_lowerCAmelCase ) )
lowerCamelCase__ = model(_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ = os.path.join(_lowerCAmelCase ,"""keras_model.h5""" )
model.save(_lowerCAmelCase )
lowerCamelCase__ = tf.keras.models.load_model(
_lowerCAmelCase ,custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_lowerCAmelCase ,tf.keras.Model )
lowerCamelCase__ = model(_lowerCAmelCase )
self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
lowerCamelCase__ = outputs.last_hidden_state.numpy()
lowerCamelCase__ = 0
else:
lowerCamelCase__ = outputs.logits.numpy()
lowerCamelCase__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase ,saved_model=_lowerCAmelCase )
lowerCamelCase__ = model_class.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
lowerCamelCase__ = after_outputs["""last_hidden_state"""].numpy()
lowerCamelCase__ = 0
else:
lowerCamelCase__ = after_outputs["""logits"""].numpy()
lowerCamelCase__ = 0
lowerCamelCase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase ,1E-5 )
def UpperCamelCase_ ( self ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
lowerCamelCase__ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_lowerCAmelCase )
lowerCamelCase__ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowerCamelCase__ = model_class.from_config(model.config )
lowerCamelCase__ = new_model(_lowerCAmelCase ) # Build model
new_model.set_weights(model.get_weights() )
lowerCamelCase__ = new_model(_lowerCAmelCase ,noise=_lowerCAmelCase )
self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def UpperCamelCase_ ( self ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def UpperCamelCase_ ( self ):
pass
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(_lowerCAmelCase )
def A__ ( ):
lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCamelCase__ = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
lowerCamelCase__ = self.default_image_processor
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase__ = ViTMAEConfig()
lowerCamelCase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(1, num_patches) )
# forward pass
lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase )
# verify the logits
lowerCamelCase__ = tf.convert_to_tensor([1, 1_96, 7_68] )
self.assertEqual(outputs.logits.shape ,_lowerCAmelCase )
lowerCamelCase__ = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] ,_lowerCAmelCase ,atol=1E-4 )
| 9 | 0 |
def A__ ( __lowerCAmelCase : Optional[int] ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), F'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
lowerCamelCase__ = F'''The input value of [n={number}] has to be > 0'''
raise ValueError(lowerCamelCase__ )
else:
lowerCamelCase__ = sylvester(number - 1 )
lowerCamelCase__ = num - 1
lowerCamelCase__ = num
return lower * upper + 1
if __name__ == "__main__":
print(F'The 8th number in Sylvester\'s sequence: {sylvester(8)}')
| 714 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=7 ,_lowerCAmelCase=3 ,_lowerCAmelCase=18 ,_lowerCAmelCase=30 ,_lowerCAmelCase=4_00 ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,):
lowerCamelCase__ = size if size is not None else {"""shortest_edge""": 18}
lowerCamelCase__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = min_resolution
lowerCamelCase__ = max_resolution
lowerCamelCase__ = do_resize
lowerCamelCase__ = size
lowerCamelCase__ = do_center_crop
lowerCamelCase__ = crop_size
lowerCamelCase__ = do_normalize
lowerCamelCase__ = image_mean
lowerCamelCase__ = image_std
def UpperCamelCase_ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase__ (a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = LevitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
lowerCamelCase__ = LevitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""image_std""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""size""" ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase ,Image.Image )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase ,np.ndarray )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase ,torch.Tensor )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 9 | 0 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def A__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] ):
lowerCamelCase__ = StableDiffusionPipeline.from_pretrained(a__ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
lowerCamelCase__ = load_file(a__ )
lowerCamelCase__ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowerCamelCase__ = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
lowerCamelCase__ = pipeline.text_encoder
else:
lowerCamelCase__ = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
lowerCamelCase__ = pipeline.unet
# find the target layer
lowerCamelCase__ = layer_infos.pop(0 )
while len(a__ ) > -1:
try:
lowerCamelCase__ = curr_layer.__getattr__(a__ )
if len(a__ ) > 0:
lowerCamelCase__ = layer_infos.pop(0 )
elif len(a__ ) == 0:
break
except Exception:
if len(a__ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
lowerCamelCase__ = layer_infos.pop(0 )
lowerCamelCase__ = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(a__ )
else:
pair_keys.append(a__ )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
lowerCamelCase__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
lowerCamelCase__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(a__ , a__ ).unsqueeze(2 ).unsqueeze(3 )
else:
lowerCamelCase__ = state_dict[pair_keys[0]].to(torch.floataa )
lowerCamelCase__ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(a__ , a__ )
# update visited list
for item in pair_keys:
visited.append(a__ )
return pipeline
if __name__ == "__main__":
UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.'
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors'
)
parser.add_argument(
'--lora_prefix_text_encoder',
default='lora_te',
type=str,
help='The prefix of text encoder weight in safetensors',
)
parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW')
parser.add_argument(
'--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.'
)
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
UpperCamelCase : List[str] = parser.parse_args()
UpperCamelCase : Optional[Any] = args.base_model_path
UpperCamelCase : Any = args.checkpoint_path
UpperCamelCase : List[str] = args.dump_path
UpperCamelCase : Tuple = args.lora_prefix_unet
UpperCamelCase : List[Any] = args.lora_prefix_text_encoder
UpperCamelCase : Optional[int] = args.alpha
UpperCamelCase : List[Any] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
UpperCamelCase : str = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 715 |
'''simple docstring'''
import numpy
# List of input, output pairs
UpperCamelCase : List[Any] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCamelCase : Optional[int] = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
UpperCamelCase : int = [2, 4, 1, 5]
UpperCamelCase : int = len(train_data)
UpperCamelCase : Dict = 0.009
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : str="train" ):
return calculate_hypothesis_value(__lowerCAmelCase , __lowerCAmelCase ) - output(
__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : Any ):
lowerCamelCase__ = 0
for i in range(len(__lowerCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=m ):
lowerCamelCase__ = 0
for i in range(__lowerCAmelCase ):
if index == -1:
summation_value += _error(__lowerCAmelCase )
else:
summation_value += _error(__lowerCAmelCase ) * train_data[i][0][index]
return summation_value
def A__ ( __lowerCAmelCase : List[Any] ):
lowerCamelCase__ = summation_of_cost_derivative(__lowerCAmelCase , __lowerCAmelCase ) / m
return cost_derivative_value
def A__ ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCamelCase__ = 0.00_0002
lowerCamelCase__ = 0
lowerCamelCase__ = 0
while True:
j += 1
lowerCamelCase__ = [0, 0, 0, 0]
for i in range(0 , len(__lowerCAmelCase ) ):
lowerCamelCase__ = get_cost_derivative(i - 1 )
lowerCamelCase__ = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__lowerCAmelCase , __lowerCAmelCase , atol=__lowerCAmelCase , rtol=__lowerCAmelCase , ):
break
lowerCamelCase__ = temp_parameter_vector
print(("""Number of iterations:""", j) )
def A__ ( ):
for i in range(len(__lowerCAmelCase ) ):
print(("""Actual output value:""", output(__lowerCAmelCase , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(__lowerCAmelCase , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 9 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def A__ ( ):
lowerCamelCase__ = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=snake_case__ )
lowerCamelCase__ = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=snake_case__ )
env_command_parser(subparsers=snake_case__ )
launch_command_parser(subparsers=snake_case__ )
tpu_command_parser(subparsers=snake_case__ )
test_command_parser(subparsers=snake_case__ )
# Let's go
lowerCamelCase__ = parser.parse_args()
if not hasattr(snake_case__ , """func""" ):
parser.print_help()
exit(1 )
# Run
args.func(snake_case__ )
if __name__ == "__main__":
main()
| 716 |
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] ):
lowerCamelCase__ = OmegaConf.load(__lowerCAmelCase )
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )["""model"""]
lowerCamelCase__ = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCamelCase__ = {}
lowerCamelCase__ = """first_stage_model."""
for key in keys:
if key.startswith(__lowerCAmelCase ):
lowerCamelCase__ = state_dict[key]
# extract state_dict for UNetLDM
lowerCamelCase__ = {}
lowerCamelCase__ = """model.diffusion_model."""
for key in keys:
if key.startswith(__lowerCAmelCase ):
lowerCamelCase__ = state_dict[key]
lowerCamelCase__ = config.model.params.first_stage_config.params
lowerCamelCase__ = config.model.params.unet_config.params
lowerCamelCase__ = VQModel(**__lowerCAmelCase ).eval()
vqvae.load_state_dict(__lowerCAmelCase )
lowerCamelCase__ = UNetLDMModel(**__lowerCAmelCase ).eval()
unet.load_state_dict(__lowerCAmelCase )
lowerCamelCase__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__lowerCAmelCase , )
lowerCamelCase__ = LDMPipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
pipeline.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
UpperCamelCase : List[Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 9 | 0 |
def A__ ( __lowerCAmelCase : str ):
if not head:
return True
# split the list to two parts
lowerCamelCase__ = head.next, head
while fast and fast.next:
lowerCamelCase__ = fast.next.next
lowerCamelCase__ = slow.next
lowerCamelCase__ = slow.next
lowerCamelCase__ = None # Don't forget here! But forget still works!
# reverse the second part
lowerCamelCase__ = None
while second:
lowerCamelCase__ = second.next
lowerCamelCase__ = node
lowerCamelCase__ = second
lowerCamelCase__ = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
lowerCamelCase__ = node.next
lowerCamelCase__ = head.next
return True
def A__ ( __lowerCAmelCase : Tuple ):
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
lowerCamelCase__ = head
while fast and fast.next:
lowerCamelCase__ = fast.next.next, slow.next
# 2. Push the second half into the stack
lowerCamelCase__ = [slow.val]
while slow.next:
lowerCamelCase__ = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
lowerCamelCase__ = cur.next
return True
def A__ ( __lowerCAmelCase : Optional[Any] ):
if not head or not head.next:
return True
lowerCamelCase__ = {}
lowerCamelCase__ = 0
while head:
if head.val in d:
d[head.val].append(__lowerCAmelCase )
else:
lowerCamelCase__ = [pos]
lowerCamelCase__ = head.next
pos += 1
lowerCamelCase__ = pos - 1
lowerCamelCase__ = 0
for v in d.values():
if len(__lowerCAmelCase ) % 2 != 0:
middle += 1
else:
lowerCamelCase__ = 0
for i in range(0 , len(__lowerCAmelCase ) ):
if v[i] + v[len(__lowerCAmelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 717 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS}
UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : tuple[int, ...] ):
lowerCamelCase__ = ""
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
for keychar, cipherchar in zip(cycle(__lowerCAmelCase ) , __lowerCAmelCase ):
lowerCamelCase__ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__lowerCAmelCase )
return decoded
def A__ ( __lowerCAmelCase : list[int] ):
lowerCamelCase__ = []
for key in product(__lowerCAmelCase , repeat=3 ):
lowerCamelCase__ = try_key(__lowerCAmelCase , __lowerCAmelCase )
if encoded is not None:
possibles.append(__lowerCAmelCase )
return possibles
def A__ ( __lowerCAmelCase : list[str] , __lowerCAmelCase : str ):
return [possible for possible in possibles if common_word in possible.lower()]
def A__ ( __lowerCAmelCase : str = "p059_cipher.txt" ):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = Path(__lowerCAmelCase ).parent.joinpath(__lowerCAmelCase ).read_text(encoding="""utf-8""" )
lowerCamelCase__ = [int(__lowerCAmelCase ) for number in data.strip().split(""",""" )]
lowerCamelCase__ = filter_valid_chars(__lowerCAmelCase )
for common_word in COMMON_WORDS:
lowerCamelCase__ = filter_common_word(__lowerCAmelCase , __lowerCAmelCase )
if len(__lowerCAmelCase ) == 1:
break
lowerCamelCase__ = possibles[0]
return sum(ord(__lowerCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F'{solution() = }')
| 9 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase : str = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[Any] = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 718 |
'''simple docstring'''
import argparse
import struct
import unittest
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ):
lowerCamelCase__ = data
# Initialize hash values
lowerCamelCase__ = [
0x6a_09_e6_67,
0xbb_67_ae_85,
0x3c_6e_f3_72,
0xa5_4f_f5_3a,
0x51_0e_52_7f,
0x9b_05_68_8c,
0x1f_83_d9_ab,
0x5b_e0_cd_19,
]
# Initialize round constants
lowerCamelCase__ = [
0x42_8a_2f_98,
0x71_37_44_91,
0xb5_c0_fb_cf,
0xe9_b5_db_a5,
0x39_56_c2_5b,
0x59_f1_11_f1,
0x92_3f_82_a4,
0xab_1c_5e_d5,
0xd8_07_aa_98,
0x12_83_5b_01,
0x24_31_85_be,
0x55_0c_7d_c3,
0x72_be_5d_74,
0x80_de_b1_fe,
0x9b_dc_06_a7,
0xc1_9b_f1_74,
0xe4_9b_69_c1,
0xef_be_47_86,
0x0f_c1_9d_c6,
0x24_0c_a1_cc,
0x2d_e9_2c_6f,
0x4a_74_84_aa,
0x5c_b0_a9_dc,
0x76_f9_88_da,
0x98_3e_51_52,
0xa8_31_c6_6d,
0xb0_03_27_c8,
0xbf_59_7f_c7,
0xc6_e0_0b_f3,
0xd5_a7_91_47,
0x06_ca_63_51,
0x14_29_29_67,
0x27_b7_0a_85,
0x2e_1b_21_38,
0x4d_2c_6d_fc,
0x53_38_0d_13,
0x65_0a_73_54,
0x76_6a_0a_bb,
0x81_c2_c9_2e,
0x92_72_2c_85,
0xa2_bf_e8_a1,
0xa8_1a_66_4b,
0xc2_4b_8b_70,
0xc7_6c_51_a3,
0xd1_92_e8_19,
0xd6_99_06_24,
0xf4_0e_35_85,
0x10_6a_a0_70,
0x19_a4_c1_16,
0x1e_37_6c_08,
0x27_48_77_4c,
0x34_b0_bc_b5,
0x39_1c_0c_b3,
0x4e_d8_aa_4a,
0x5b_9c_ca_4f,
0x68_2e_6f_f3,
0x74_8f_82_ee,
0x78_a5_63_6f,
0x84_c8_78_14,
0x8c_c7_02_08,
0x90_be_ff_fa,
0xa4_50_6c_eb,
0xbe_f9_a3_f7,
0xc6_71_78_f2,
]
lowerCamelCase__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def UpperCamelCase_ ( _lowerCAmelCase ):
lowerCamelCase__ = B"""\x80""" + (B"""\x00""" * (63 - (len(_lowerCAmelCase ) + 8) % 64))
lowerCamelCase__ = struct.pack(""">Q""" ,(len(_lowerCAmelCase ) * 8) )
return data + padding + big_endian_integer
def UpperCamelCase_ ( self ):
# Convert into blocks of 64 bytes
lowerCamelCase__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowerCamelCase__ = list(struct.unpack(""">16L""" ,_lowerCAmelCase ) )
# add 48 0-ed integers
words += [0] * 48
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowerCamelCase__ = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
lowerCamelCase__ = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
lowerCamelCase__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
lowerCamelCase__ = self.ror(_lowerCAmelCase ,6 ) ^ self.ror(_lowerCAmelCase ,11 ) ^ self.ror(_lowerCAmelCase ,25 )
lowerCamelCase__ = (e & f) ^ ((~e & 0xff_ff_ff_ff) & g)
lowerCamelCase__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
lowerCamelCase__ = self.ror(_lowerCAmelCase ,2 ) ^ self.ror(_lowerCAmelCase ,13 ) ^ self.ror(_lowerCAmelCase ,22 )
lowerCamelCase__ = (a & b) ^ (a & c) ^ (b & c)
lowerCamelCase__ = (sa + maj) % 0x1_00_00_00_00
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
lowerCamelCase__ = [a, b, c, d, e, f, g, h]
# Modify final values
lowerCamelCase__ = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
lowerCamelCase__ = """""".join([hex(_lowerCAmelCase )[2:].zfill(8 ) for value in self.hashes] )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
return 0xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations)
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
import hashlib
lowerCamelCase__ = bytes("""Test String""" ,"""utf-8""" )
self.assertEqual(SHAaaa(_lowerCAmelCase ).hash ,hashlib.shaaaa(_lowerCAmelCase ).hexdigest() )
def A__ ( ):
import doctest
doctest.testmod()
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
lowerCamelCase__ = f.read()
else:
lowerCamelCase__ = bytes(__lowerCAmelCase , """utf-8""" )
print(SHAaaa(__lowerCAmelCase ).hash )
if __name__ == "__main__":
main()
| 9 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase : Optional[int] = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class UpperCamelCase__ (__lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase = 'ernie_m'
_UpperCamelCase = {'dropout': 'classifier_dropout', 'num_classes': 'num_labels'}
def __init__( self ,_lowerCAmelCase = 25_00_02 ,_lowerCAmelCase = 7_68 ,_lowerCAmelCase = 12 ,_lowerCAmelCase = 12 ,_lowerCAmelCase = 30_72 ,_lowerCAmelCase = "gelu" ,_lowerCAmelCase = 0.1 ,_lowerCAmelCase = 0.1 ,_lowerCAmelCase = 5_14 ,_lowerCAmelCase = 0.02 ,_lowerCAmelCase = 1 ,_lowerCAmelCase = 1E-05 ,_lowerCAmelCase=None ,_lowerCAmelCase=False ,_lowerCAmelCase=0.0 ,**_lowerCAmelCase ,):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = classifier_dropout
lowerCamelCase__ = is_decoder
lowerCamelCase__ = act_dropout
| 719 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def A__ ( __lowerCAmelCase : Union[str, Any] ):
lowerCamelCase__ = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : Tuple ):
lowerCamelCase__ , lowerCamelCase__ = emb.weight.shape
lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
lowerCamelCase__ = emb.weight.data
return lin_layer
def A__ ( __lowerCAmelCase : Dict ):
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
lowerCamelCase__ = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
lowerCamelCase__ = mam_aaa["""model"""]
remove_ignore_keys_(__lowerCAmelCase )
lowerCamelCase__ = state_dict["""encoder.embed_tokens.weight"""].shape[0]
lowerCamelCase__ = MaMaaaConfig(
vocab_size=__lowerCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
lowerCamelCase__ = state_dict["""decoder.embed_tokens.weight"""]
lowerCamelCase__ = MaMaaaForConditionalGeneration(__lowerCAmelCase )
model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
lowerCamelCase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
UpperCamelCase : Tuple = parser.parse_args()
UpperCamelCase : List[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 9 | 0 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : int , __lowerCAmelCase : int = 0 , __lowerCAmelCase : int = 0 ):
lowerCamelCase__ = right or len(__lowerCAmelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__lowerCAmelCase , __lowerCAmelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = BlipImageProcessor()
lowerCamelCase__ = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
lowerCamelCase__ = BlipProcessor(_lowerCAmelCase ,_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).tokenizer
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).image_processor
def UpperCamelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
lowerCamelCase__ = [Image.fromarray(np.moveaxis(_lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self ):
lowerCamelCase__ = BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
lowerCamelCase__ = self.get_image_processor(do_normalize=_lowerCAmelCase ,padding_value=1.0 )
lowerCamelCase__ = BlipProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=_lowerCAmelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = image_processor(_lowerCAmelCase ,return_tensors="""np""" )
lowerCamelCase__ = processor(images=_lowerCAmelCase ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = processor(text=_lowerCAmelCase )
lowerCamelCase__ = tokenizer(_lowerCAmelCase ,return_token_type_ids=_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ = processor.batch_decode(_lowerCAmelCase )
lowerCamelCase__ = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
| 9 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=7 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=99 ,_lowerCAmelCase=32 ,_lowerCAmelCase=5 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=5_12 ,_lowerCAmelCase=16 ,_lowerCAmelCase=2 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=3 ,_lowerCAmelCase=4 ,_lowerCAmelCase=None ,):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
def UpperCamelCase_ ( self ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] ,self.num_choices )
lowerCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self ):
return NystromformerConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=UpperCAmelCase__ ,initializer_range=self.initializer_range ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = NystromformerModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCamelCase__ = model(UpperCAmelCase__ ,attention_mask=UpperCAmelCase__ ,token_type_ids=UpperCAmelCase__ )
lowerCamelCase__ = model(UpperCAmelCase__ ,token_type_ids=UpperCAmelCase__ )
lowerCamelCase__ = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = NystromformerForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCamelCase__ = model(UpperCAmelCase__ ,attention_mask=UpperCAmelCase__ ,token_type_ids=UpperCAmelCase__ ,labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = NystromformerForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCamelCase__ = model(
UpperCAmelCase__ ,attention_mask=UpperCAmelCase__ ,token_type_ids=UpperCAmelCase__ ,start_positions=UpperCAmelCase__ ,end_positions=UpperCAmelCase__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = NystromformerForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCamelCase__ = model(UpperCAmelCase__ ,attention_mask=UpperCAmelCase__ ,token_type_ids=UpperCAmelCase__ ,labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = NystromformerForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCamelCase__ = model(UpperCAmelCase__ ,attention_mask=UpperCAmelCase__ ,token_type_ids=UpperCAmelCase__ ,labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.num_choices
lowerCamelCase__ = NystromformerForMultipleChoice(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowerCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowerCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowerCamelCase__ = model(
UpperCAmelCase__ ,attention_mask=UpperCAmelCase__ ,token_type_ids=UpperCAmelCase__ ,labels=UpperCAmelCase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.prepare_config_and_inputs()
(
lowerCamelCase__
) = config_and_inputs
lowerCamelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ (lowercase__ ,lowercase__ ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCamelCase = (
{
'feature-extraction': NystromformerModel,
'fill-mask': NystromformerForMaskedLM,
'question-answering': NystromformerForQuestionAnswering,
'text-classification': NystromformerForSequenceClassification,
'token-classification': NystromformerForTokenClassification,
'zero-shot': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase = False
_UpperCamelCase = False
def UpperCamelCase_ ( self ):
lowerCamelCase__ = NystromformerModelTester(self )
lowerCamelCase__ = ConfigTester(self ,config_class=UpperCAmelCase__ ,hidden_size=37 )
def UpperCamelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__ = type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
@slow
def UpperCamelCase_ ( self ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = NystromformerModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_torch
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" )
lowerCamelCase__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
lowerCamelCase__ = model(UpperCAmelCase__ )[0]
lowerCamelCase__ = torch.Size((1, 6, 7_68) )
self.assertEqual(output.shape ,UpperCAmelCase__ )
lowerCamelCase__ = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,UpperCAmelCase__ ,atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = '''the [MASK] of Belgium is Brussels'''
lowerCamelCase__ = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" )
lowerCamelCase__ = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" )
lowerCamelCase__ = tokenizer(UpperCAmelCase__ ,return_tensors="""pt""" )
with torch.no_grad():
lowerCamelCase__ = model(encoding.input_ids ).logits
lowerCamelCase__ = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(UpperCAmelCase__ ) ,"""capital""" )
| 721 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def A__ ( __lowerCAmelCase : Union[str, Any] ):
if hor == 128:
lowerCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
lowerCamelCase__ = (32, 128, 256)
lowerCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
lowerCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
lowerCamelCase__ = (32, 64, 128, 256)
lowerCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
lowerCamelCase__ = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
lowerCamelCase__ = model.state_dict()
lowerCamelCase__ = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_5536,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
lowerCamelCase__ = UNetaDModel(**__lowerCAmelCase )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase__ = state_dict.pop(__lowerCAmelCase )
hf_value_function.load_state_dict(__lowerCAmelCase )
torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , """w""" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( ):
lowerCamelCase__ = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 128, 256),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_5536,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
lowerCamelCase__ = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
lowerCamelCase__ = model
lowerCamelCase__ = UNetaDModel(**__lowerCAmelCase )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase__ = state_dict.pop(__lowerCAmelCase )
hf_value_function.load_state_dict(__lowerCAmelCase )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 9 | 0 |
import requests
def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ):
lowerCamelCase__ = {"""Content-Type""": """application/json"""}
lowerCamelCase__ = requests.post(_lowerCAmelCase , json={"""text""": message_body} , headers=_lowerCAmelCase )
if response.status_code != 200:
lowerCamelCase__ = (
"""Request to slack returned an error """
F'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(_lowerCAmelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 700 |
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase = "▁" ,_lowerCAmelCase = True ,_lowerCAmelCase = "<unk>" ,_lowerCAmelCase = "</s>" ,_lowerCAmelCase = "<pad>" ,):
lowerCamelCase__ = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
lowerCamelCase__ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
lowerCamelCase__ = token_dict["""token"""]
lowerCamelCase__ = Tokenizer(Unigram() )
lowerCamelCase__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ) ,""" """ ),
normalizers.Lowercase(),
] )
lowerCamelCase__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ),
pre_tokenizers.Digits(individual_digits=_lowerCAmelCase ),
pre_tokenizers.Punctuation(),
] )
lowerCamelCase__ = decoders.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase )
lowerCamelCase__ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' ,special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] ,)
lowerCamelCase__ = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,):
lowerCamelCase__ = trainers.UnigramTrainer(
vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,)
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = [files]
self._tokenizer.train(_lowerCAmelCase ,trainer=_lowerCAmelCase )
self.add_unk_id()
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,):
lowerCamelCase__ = trainers.UnigramTrainer(
vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,)
self._tokenizer.train_from_iterator(_lowerCAmelCase ,trainer=_lowerCAmelCase )
self.add_unk_id()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = json.loads(self._tokenizer.to_str() )
lowerCamelCase__ = self.special_tokens["""unk"""]["""id"""]
lowerCamelCase__ = Tokenizer.from_str(json.dumps(_lowerCAmelCase ) )
| 9 | 0 |
'''simple docstring'''
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = name
lowerCamelCase__ = val
def __str__( self ):
return F'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self ,_lowerCAmelCase ):
return self.val < other.val
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ):
lowerCamelCase__ = {}
lowerCamelCase__ = {}
lowerCamelCase__ = self.build_heap(__lowerCamelCase )
def __getitem__( self ,_lowerCAmelCase ):
return self.get_value(__lowerCamelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
return (idx - 1) // 2
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
return idx * 2 + 1
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
return idx * 2 + 2
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
return self.heap_dict[key]
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = len(__lowerCamelCase ) - 1
lowerCamelCase__ = self.get_parent_idx(__lowerCamelCase )
for idx, i in enumerate(__lowerCamelCase ):
lowerCamelCase__ = idx
lowerCamelCase__ = i.val
for i in range(__lowerCamelCase ,-1 ,-1 ):
self.sift_down(__lowerCamelCase ,__lowerCamelCase )
return array
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
while True:
lowerCamelCase__ = self.get_left_child_idx(__lowerCamelCase ) # noqa: E741
lowerCamelCase__ = self.get_right_child_idx(__lowerCamelCase )
lowerCamelCase__ = idx
if l < len(__lowerCamelCase ) and array[l] < array[idx]:
lowerCamelCase__ = l
if r < len(__lowerCamelCase ) and array[r] < array[smallest]:
lowerCamelCase__ = r
if smallest != idx:
lowerCamelCase__ = array[smallest], array[idx]
(
lowerCamelCase__
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowerCamelCase__ = smallest
else:
break
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = self.get_parent_idx(__lowerCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowerCamelCase__ = self.heap[idx], self.heap[p]
lowerCamelCase__ = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowerCamelCase__ = p
lowerCamelCase__ = self.get_parent_idx(__lowerCamelCase )
def UpperCamelCase_ ( self ):
return self.heap[0]
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.heap[-1], self.heap[0]
lowerCamelCase__ = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowerCamelCase__ = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 ,self.heap )
return x
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
self.heap.append(__lowerCamelCase )
lowerCamelCase__ = len(self.heap ) - 1
lowerCamelCase__ = node.val
self.sift_up(len(self.heap ) - 1 )
def UpperCamelCase_ ( self ):
return len(self.heap ) == 0
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowerCamelCase__ = new_value
lowerCamelCase__ = new_value
self.sift_up(self.idx_of_element[node] )
UpperCamelCase : Optional[Any] = Node('R', -1)
UpperCamelCase : Dict = Node('B', 6)
UpperCamelCase : List[Any] = Node('A', 3)
UpperCamelCase : List[Any] = Node('X', 1)
UpperCamelCase : Union[str, Any] = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
UpperCamelCase : Any = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
'''simple docstring'''
from __future__ import annotations
import math
def A__ ( __lowerCAmelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
UpperCamelCase : str = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def A__ ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
lowerCamelCase__ = []
for num in range(len(__lowerCAmelCase ) ):
lowerCamelCase__ = 0
while 2 * i * i <= odd_composites[num]:
lowerCamelCase__ = odd_composites[num] - 2 * i * i
if is_prime(__lowerCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__lowerCAmelCase ) == n:
return list_nums
return []
def A__ ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 9 | 0 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : List[Any] ):
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
lowerCamelCase__ , lowerCamelCase__ = 1, 1
for _ in range(number_of_steps - 1 ):
lowerCamelCase__ , lowerCamelCase__ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
'''simple docstring'''
def A__ ( ):
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(__lowerCAmelCase , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 9 | 0 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCamelCase : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def A__ ( __lowerCAmelCase : str ):
for pegasus_name, hf_name in PATTERNS:
lowerCamelCase__ = k.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return k
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Any ):
lowerCamelCase__ = DEFAULTS.copy()
cfg_kwargs.update(__SCREAMING_SNAKE_CASE )
lowerCamelCase__ = PegasusConfig(**__SCREAMING_SNAKE_CASE )
lowerCamelCase__ = PegasusForConditionalGeneration(__SCREAMING_SNAKE_CASE )
lowerCamelCase__ = torch_model.model.state_dict()
lowerCamelCase__ = {}
for k, v in tf_weights.items():
lowerCamelCase__ = rename_state_dict_key(__SCREAMING_SNAKE_CASE )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
lowerCamelCase__ = v.T
lowerCamelCase__ = torch.tensor(__SCREAMING_SNAKE_CASE , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
lowerCamelCase__ = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
lowerCamelCase__ = mapping["shared.weight"]
lowerCamelCase__ = mapping["shared.weight"]
lowerCamelCase__ = {k: torch.zeros_like(__SCREAMING_SNAKE_CASE ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__SCREAMING_SNAKE_CASE )
lowerCamelCase__ = torch_model.model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
lowerCamelCase__ = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def A__ ( __lowerCAmelCase : List[Any]="./ckpt/aeslc/model.ckpt-32000" ):
lowerCamelCase__ = tf.train.list_variables(__SCREAMING_SNAKE_CASE )
lowerCamelCase__ = {}
lowerCamelCase__ = ["Adafactor", "global_step"]
for name, shape in tqdm(__SCREAMING_SNAKE_CASE , desc="""converting tf checkpoint to dict""" ):
lowerCamelCase__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowerCamelCase__ = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase__ = array
return tf_weights
def A__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] ):
lowerCamelCase__ = Path(__SCREAMING_SNAKE_CASE ).parent.name
lowerCamelCase__ = task_specific_params[F'''summarization_{dataset}''']["max_position_embeddings"]
lowerCamelCase__ = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__SCREAMING_SNAKE_CASE )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__SCREAMING_SNAKE_CASE )
# convert model
lowerCamelCase__ = get_tf_weights_as_numpy(__SCREAMING_SNAKE_CASE )
lowerCamelCase__ = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
lowerCamelCase__ = task_specific_params
lowerCamelCase__ = convert_pegasus(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
torch_model.save_pretrained(__SCREAMING_SNAKE_CASE )
lowerCamelCase__ = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__SCREAMING_SNAKE_CASE , Path(__SCREAMING_SNAKE_CASE ) / """pytorch_model.bin""" )
if __name__ == "__main__":
UpperCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
UpperCamelCase : Any = parser.parse_args()
if args.save_dir is None:
UpperCamelCase : Any = Path(args.tf_ckpt_path).parent.name
UpperCamelCase : Dict = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 703 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'}
UpperCamelCase : Dict = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
UpperCamelCase : List[Any] = {
'camembert-base': 5_12,
}
UpperCamelCase : List[str] = '▁'
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = ['input_ids', 'attention_mask']
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="<unk>" ,_lowerCAmelCase="<pad>" ,_lowerCAmelCase="<mask>" ,_lowerCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] ,_lowerCAmelCase = None ,**_lowerCAmelCase ,):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else mask_token
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase ,eos_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,additional_special_tokens=_lowerCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_lowerCAmelCase ,)
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
lowerCamelCase__ = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowerCamelCase__ = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3}
lowerCamelCase__ = len(self.fairseq_tokens_to_ids )
lowerCamelCase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
lowerCamelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase ,token_ids_a=_lowerCAmelCase ,already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase_ ( self ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
return self.sp_model.encode(_lowerCAmelCase ,out_type=_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_lowerCAmelCase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = []
lowerCamelCase__ = """"""
lowerCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowerCamelCase__ = True
lowerCamelCase__ = []
else:
current_sub_tokens.append(_lowerCAmelCase )
lowerCamelCase__ = False
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def __getstate__( self ):
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
return state
def __setstate__( self ,_lowerCAmelCase ):
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ = os.path.join(
_lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase ,"""wb""" ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 9 | 0 |
'''simple docstring'''
from torch import nn
class UpperCamelCase__ (nn.Module ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ):
super().__init__()
lowerCamelCase__ = class_size
lowerCamelCase__ = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
lowerCamelCase__ = nn.Linear(A_ ,A_ )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
lowerCamelCase__ = self.mlp(A_ )
return logits
| 704 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = R"""\w+[.]\d+"""
lowerCamelCase__ = re.findall(__lowerCAmelCase , __lowerCAmelCase )
for pat in pats:
lowerCamelCase__ = key.replace(__lowerCAmelCase , """_""".join(pat.split(""".""" ) ) )
return key
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ):
lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCamelCase__ = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
lowerCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase__ = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase__ = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple=42 ):
# Step 1: Convert pytorch tensor to numpy
lowerCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCamelCase__ = flax_model.init_weights(PRNGKey(__lowerCAmelCase ) )
lowerCamelCase__ = flatten_dict(__lowerCAmelCase )
lowerCamelCase__ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase__ = rename_key(__lowerCAmelCase )
lowerCamelCase__ = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
lowerCamelCase__ , lowerCamelCase__ = rename_key_and_reshape_tensor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
lowerCamelCase__ = jnp.asarray(__lowerCAmelCase )
return unflatten_dict(__lowerCAmelCase )
| 9 | 0 |
'''simple docstring'''
UpperCamelCase : List[Any] = frozenset(
[
'prompt',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
UpperCamelCase : Any = frozenset(['prompt', 'negative_prompt'])
UpperCamelCase : Dict = frozenset([])
UpperCamelCase : Optional[int] = frozenset(['image'])
UpperCamelCase : Any = frozenset(
[
'image',
'height',
'width',
'guidance_scale',
]
)
UpperCamelCase : int = frozenset(['image'])
UpperCamelCase : Union[str, Any] = frozenset(
[
'prompt',
'image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
UpperCamelCase : Union[str, Any] = frozenset(['prompt', 'image', 'negative_prompt'])
UpperCamelCase : Dict = frozenset(
[
# Text guided image variation with an image mask
'prompt',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
UpperCamelCase : Dict = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt'])
UpperCamelCase : List[str] = frozenset(
[
# image variation with an image mask
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
UpperCamelCase : Any = frozenset(['image', 'mask_image'])
UpperCamelCase : Tuple = frozenset(
[
'example_image',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
UpperCamelCase : List[str] = frozenset(['example_image', 'image', 'mask_image'])
UpperCamelCase : Tuple = frozenset(['class_labels'])
UpperCamelCase : Union[str, Any] = frozenset(['class_labels'])
UpperCamelCase : Dict = frozenset(['batch_size'])
UpperCamelCase : Optional[int] = frozenset([])
UpperCamelCase : Tuple = frozenset(['batch_size'])
UpperCamelCase : Dict = frozenset([])
UpperCamelCase : Optional[Any] = frozenset(
[
'prompt',
'audio_length_in_s',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
UpperCamelCase : Union[str, Any] = frozenset(['prompt', 'negative_prompt'])
UpperCamelCase : Union[str, Any] = frozenset(['input_tokens'])
UpperCamelCase : Dict = frozenset(['input_tokens'])
| 705 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] ,model_result["""ss"""] ):
lowerCamelCase__ = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sgugger/tiny-distilbert-classification"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,only_pretrain_model=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """patrickvonplaten/t5-tiny-random"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,configs=[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,"""Cannot do xla on CPU.""" )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=_lowerCAmelCase ,save_to_csv=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ,inference_memory_csv_file=os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ,env_info_csv_file=os.path.join(_lowerCAmelCase ,"""env.csv""" ) ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""env.csv""" ) ).exists() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(_lowerCAmelCase ):
self.assertTrue(hasattr(_lowerCAmelCase ,"""sequential""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""cumulative""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""current""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(_lowerCAmelCase ,"""log.txt""" ) ,log_print=_lowerCAmelCase ,trace_memory_line_by_line=_lowerCAmelCase ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""log.txt""" ) ).exists() )
| 9 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=7 ,_lowerCAmelCase=3 ,_lowerCAmelCase=18 ,_lowerCAmelCase=30 ,_lowerCAmelCase=4_00 ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,):
lowerCamelCase__ = size if size is not None else {"""shortest_edge""": 18}
lowerCamelCase__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = min_resolution
lowerCamelCase__ = max_resolution
lowerCamelCase__ = do_resize
lowerCamelCase__ = size
lowerCamelCase__ = do_center_crop
lowerCamelCase__ = crop_size
lowerCamelCase__ = do_normalize
lowerCamelCase__ = image_mean
lowerCamelCase__ = image_std
def UpperCamelCase_ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase__ (a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = LevitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
lowerCamelCase__ = LevitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(__UpperCamelCase ,"""image_std""" ) )
self.assertTrue(hasattr(__UpperCamelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(__UpperCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(__UpperCamelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(__UpperCamelCase ,"""size""" ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase ,Image.Image )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(__UpperCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase ,numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase ,np.ndarray )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(__UpperCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase ,torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase ,torch.Tensor )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(__UpperCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 706 |
'''simple docstring'''
from math import factorial
UpperCamelCase : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def A__ ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__lowerCAmelCase ) )
def A__ ( __lowerCAmelCase : int = 60 , __lowerCAmelCase : int = 100_0000 ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
lowerCamelCase__ = 0
# the cached sizes of the previous chains
lowerCamelCase__ = {}
for start_chain_element in range(1 , __lowerCAmelCase ):
# The temporary set will contain the elements of the chain
lowerCamelCase__ = set()
lowerCamelCase__ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCamelCase__ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__lowerCAmelCase )
chain_set_length += 1
lowerCamelCase__ = digit_factorial_sum(__lowerCAmelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCamelCase__ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution()}')
| 9 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
UpperCamelCase : Any = logging.get_logger(__name__)
class UpperCamelCase__ (A_ ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ):
lowerCamelCase__ = feature_size
lowerCamelCase__ = sampling_rate
lowerCamelCase__ = padding_value
lowerCamelCase__ = kwargs.pop("""padding_side""" ,"""right""" )
lowerCamelCase__ = kwargs.pop("""return_attention_mask""" ,_lowerCAmelCase )
super().__init__(**_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = True ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(_lowerCAmelCase ,(list, tuple) ) and isinstance(processed_features[0] ,(dict, BatchFeature) ):
lowerCamelCase__ = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
F''' to this method that includes {self.model_input_names[0]}, but you provided'''
F''' {list(processed_features.keys() )}''' )
lowerCamelCase__ = processed_features[self.model_input_names[0]]
lowerCamelCase__ = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(_lowerCAmelCase ) == 0:
if return_attention_mask:
lowerCamelCase__ = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
lowerCamelCase__ = required_input[0]
if isinstance(_lowerCAmelCase ,(list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
lowerCamelCase__ = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(_lowerCAmelCase ):
lowerCamelCase__ = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(_lowerCAmelCase ):
lowerCamelCase__ = """tf"""
elif is_torch_tensor(_lowerCAmelCase ):
lowerCamelCase__ = """pt"""
elif isinstance(_lowerCAmelCase ,(int, float, list, tuple, np.ndarray) ):
lowerCamelCase__ = """np"""
else:
raise ValueError(
F'''type of {first_element} unknown: {type(_lowerCAmelCase )}. '''
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] ,(int, float) ):
lowerCamelCase__ = to_numpy(_lowerCAmelCase )
else:
lowerCamelCase__ = [to_numpy(_lowerCAmelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
lowerCamelCase__ = self._get_padding_strategies(padding=_lowerCAmelCase ,max_length=_lowerCAmelCase )
lowerCamelCase__ = processed_features[self.model_input_names[0]]
lowerCamelCase__ = len(_lowerCAmelCase )
if not all(len(_lowerCAmelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
lowerCamelCase__ = []
for i in range(_lowerCAmelCase ):
lowerCamelCase__ = {k: v[i] for k, v in processed_features.items()}
# truncation
lowerCamelCase__ = self._truncate(
_lowerCAmelCase ,max_length=_lowerCAmelCase ,pad_to_multiple_of=_lowerCAmelCase ,truncation=_lowerCAmelCase ,)
truncated_inputs.append(_lowerCAmelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
lowerCamelCase__ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
lowerCamelCase__ = PaddingStrategy.MAX_LENGTH
lowerCamelCase__ = {}
for i in range(_lowerCAmelCase ):
# padding
lowerCamelCase__ = self._pad(
truncated_inputs[i] ,max_length=_lowerCAmelCase ,padding_strategy=_lowerCAmelCase ,pad_to_multiple_of=_lowerCAmelCase ,return_attention_mask=_lowerCAmelCase ,)
for key, value in outputs.items():
if key not in batch_outputs:
lowerCamelCase__ = []
if value.dtype is np.dtype(np.floataa ):
lowerCamelCase__ = value.astype(np.floataa )
batch_outputs[key].append(_lowerCAmelCase )
return BatchFeature(_lowerCAmelCase ,tensor_type=_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = PaddingStrategy.DO_NOT_PAD ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,):
lowerCamelCase__ = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
lowerCamelCase__ = len(_lowerCAmelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowerCamelCase__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowerCamelCase__ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_lowerCAmelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
lowerCamelCase__ = np.ones(len(_lowerCAmelCase ) ,dtype=np.intaa )
if needs_to_be_padded:
lowerCamelCase__ = max_length - len(_lowerCAmelCase )
if self.padding_side == "right":
if return_attention_mask:
lowerCamelCase__ = np.pad(
processed_features["""attention_mask"""] ,(0, difference) )
lowerCamelCase__ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
lowerCamelCase__ = np.pad(
_lowerCAmelCase ,_lowerCAmelCase ,"""constant""" ,constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
lowerCamelCase__ = np.pad(
processed_features["""attention_mask"""] ,(difference, 0) )
lowerCamelCase__ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
lowerCamelCase__ = np.pad(
_lowerCAmelCase ,_lowerCAmelCase ,"""constant""" ,constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
lowerCamelCase__ = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowerCamelCase__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowerCamelCase__ = len(_lowerCAmelCase ) > max_length
if needs_to_be_truncated:
lowerCamelCase__ = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
lowerCamelCase__ = processed_features["""attention_mask"""][:max_length]
return processed_features
def UpperCamelCase_ ( self ,_lowerCAmelCase=False ,_lowerCAmelCase=None ):
# Get padding strategy
if padding is not False:
if padding is True:
lowerCamelCase__ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = PaddingStrategy(_lowerCAmelCase )
elif isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = padding
else:
lowerCamelCase__ = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 707 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
UpperCamelCase : Optional[Any] = 'src/diffusers'
# Matches is_xxx_available()
UpperCamelCase : Union[str, Any] = re.compile(r'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
UpperCamelCase : Optional[Any] = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
UpperCamelCase : Optional[int] = '\n{0} = None\n'
UpperCamelCase : Optional[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
UpperCamelCase : Any = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def A__ ( __lowerCAmelCase : Union[str, Any] ):
lowerCamelCase__ = _re_backend.findall(__lowerCAmelCase )
if len(__lowerCAmelCase ) == 0:
return None
return "_and_".join(__lowerCAmelCase )
def A__ ( ):
with open(os.path.join(__lowerCAmelCase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase__ = 0
lowerCamelCase__ = {}
# Go through the end of the file
while line_index < len(__lowerCAmelCase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
lowerCamelCase__ = []
# Until we unindent, add backend objects to the list
while line_index < len(__lowerCAmelCase ) and len(lines[line_index] ) > 1:
lowerCamelCase__ = lines[line_index]
lowerCamelCase__ = _re_single_line_import.search(__lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__lowerCAmelCase ) > 0:
lowerCamelCase__ = objects
else:
line_index += 1
return backend_specific_objects
def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict ):
if name.isupper():
return DUMMY_CONSTANT.format(__lowerCAmelCase )
elif name.islower():
return DUMMY_FUNCTION.format(__lowerCAmelCase , __lowerCAmelCase )
else:
return DUMMY_CLASS.format(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : Optional[int]=None ):
if backend_specific_objects is None:
lowerCamelCase__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase__ = {}
for backend, objects in backend_specific_objects.items():
lowerCamelCase__ = """[""" + """, """.join(F'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]"""
lowerCamelCase__ = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__lowerCAmelCase , __lowerCAmelCase ) for o in objects] )
lowerCamelCase__ = dummy_file
return dummy_files
def A__ ( __lowerCAmelCase : List[str]=False ):
lowerCamelCase__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase__ = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
lowerCamelCase__ = os.path.join(__lowerCAmelCase , """utils""" )
lowerCamelCase__ = {
backend: os.path.join(__lowerCAmelCase , F'''dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py''' )
for backend in dummy_files.keys()
}
lowerCamelCase__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__lowerCAmelCase ):
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.read()
else:
lowerCamelCase__ = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py as the main '''
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
F'''diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py. Run `make fix-copies` '''
"""to fix this.""" )
if __name__ == "__main__":
UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase : Any = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 9 | 0 |
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class UpperCamelCase__ (__A ):
'''simple docstring'''
_UpperCamelCase = (DPMSolverSDEScheduler,)
_UpperCamelCase = 10
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
lowerCamelCase__ = {
'num_train_timesteps': 11_00,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**UpperCamelCase__ )
return config
def UpperCamelCase_ ( self ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def UpperCamelCase_ ( self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] ,[0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCamelCase__ ,beta_end=UpperCamelCase__ )
def UpperCamelCase_ ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCamelCase__ )
def UpperCamelCase_ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase__ = sample.to(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase__ = scheduler.scale_model_input(UpperCamelCase__ ,UpperCamelCase__ )
lowerCamelCase__ = model(UpperCamelCase__ ,UpperCamelCase__ )
lowerCamelCase__ = scheduler.step(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ )
lowerCamelCase__ = output.prev_sample
lowerCamelCase__ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCamelCase__ = torch.mean(torch.abs(UpperCamelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.47_82_10_44_92_18_75 ) < 1E-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_21_11_81_64_06 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCamelCase__ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase__ = sample.to(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase__ = scheduler.scale_model_input(UpperCamelCase__ ,UpperCamelCase__ )
lowerCamelCase__ = model(UpperCamelCase__ ,UpperCamelCase__ )
lowerCamelCase__ = scheduler.step(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ )
lowerCamelCase__ = output.prev_sample
lowerCamelCase__ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCamelCase__ = torch.mean(torch.abs(UpperCamelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_24.77_14_92_00_43_94_53 ) < 1E-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_28.1_66_33_60_59_57_03 ) < 1E-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3
else:
assert abs(result_sum.item() - 1_19.8_48_75_48_82_81_25 ) < 1E-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps ,device=UpperCamelCase__ )
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter.to(UpperCamelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase__ = scheduler.scale_model_input(UpperCamelCase__ ,UpperCamelCase__ )
lowerCamelCase__ = model(UpperCamelCase__ ,UpperCamelCase__ )
lowerCamelCase__ = scheduler.step(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ )
lowerCamelCase__ = output.prev_sample
lowerCamelCase__ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCamelCase__ = torch.mean(torch.abs(UpperCamelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.46_95_73_97_46_09_38 ) < 1E-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_36_37_69_53_12 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**UpperCamelCase__ ,use_karras_sigmas=UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps ,device=UpperCamelCase__ )
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter.to(UpperCamelCase__ ) * scheduler.init_noise_sigma
lowerCamelCase__ = sample.to(UpperCamelCase__ )
for t in scheduler.timesteps:
lowerCamelCase__ = scheduler.scale_model_input(UpperCamelCase__ ,UpperCamelCase__ )
lowerCamelCase__ = model(UpperCamelCase__ ,UpperCamelCase__ )
lowerCamelCase__ = scheduler.step(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ )
lowerCamelCase__ = output.prev_sample
lowerCamelCase__ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCamelCase__ = torch.mean(torch.abs(UpperCamelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_76.66_97_41_35_74_21_88 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_77.63_65_35_64_45_31_25 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
else:
assert abs(result_sum.item() - 1_70.3_13_52_23_38_86_72 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
| 708 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase : List[Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ (a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = GPTSwaTokenizer
_UpperCamelCase = False
_UpperCamelCase = True
_UpperCamelCase = False
def UpperCamelCase_ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ,eos_token="""<unk>""" ,bos_token="""<unk>""" ,pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = """This is a test"""
lowerCamelCase__ = """This is a test"""
return input_text, output_text
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """<s>"""
lowerCamelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) ,_lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""j""" )
self.assertEqual(len(_lowerCAmelCase ) ,20_00 )
def UpperCamelCase_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,20_00 )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase )
lowerCamelCase__ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowerCAmelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) ,[4_65, 2_87, 2_65, 6_31, 8_42] )
lowerCamelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
_lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ,)
# fmt: on
lowerCamelCase__ = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase ,[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] ,)
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
# fmt: off
self.assertListEqual(
_lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def UpperCamelCase_ ( self ):
lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase )
lowerCamelCase__ = ["""This is a test""", """I was born in 92000, and this is falsé."""]
lowerCamelCase__ = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ):
self.assertListEqual(tokenizer.encode_fast(_lowerCAmelCase ) ,_lowerCAmelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ):
self.assertEqual(tokenizer.decode_fast(_lowerCAmelCase ) ,_lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
lowerCamelCase__ = {"""input_ids""": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase ,model_name="""AI-Sweden/gpt-sw3-126m""" ,sequences=_lowerCAmelCase ,)
| 9 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def A__ ( __lowerCAmelCase : Tuple ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
UpperCamelCase : Any = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def A__ ( __lowerCAmelCase : Union[str, Any] ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
lowerCamelCase__ = []
for num in range(len(__lowerCAmelCase ) ):
lowerCamelCase__ = 0
while 2 * i * i <= odd_composites[num]:
lowerCamelCase__ = odd_composites[num] - 2 * i * i
if is_prime(__lowerCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__lowerCAmelCase ) == n:
return list_nums
return []
def A__ ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 709 |
'''simple docstring'''
from manim import *
class UpperCamelCase__ (a ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
lowerCamelCase__ = Rectangle(height=0.5 ,width=0.5 )
lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = VGroup(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""CPU""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCAmelCase )
lowerCamelCase__ = [mem.copy() for i in range(1 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""GPU""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
gpu.align_to(_lowerCAmelCase ,_lowerCAmelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_lowerCAmelCase )
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""Model""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,)
lowerCamelCase__ = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' ,font_size=24 ,)
lowerCamelCase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase__ = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCAmelCase ,run_time=2.5 ) ,Write(_lowerCAmelCase ) ,Write(_lowerCAmelCase ) )
self.add(_lowerCAmelCase )
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
for i, rect in enumerate(_lowerCAmelCase ):
lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase ,opacity=0.7 )
cpu_target.move_to(_lowerCAmelCase )
cpu_target.generate_target()
lowerCamelCase__ = 0.46 / 4
lowerCamelCase__ = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=_lowerCAmelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=_lowerCAmelCase ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=_lowerCAmelCase ,buff=0.0 )
cpu_targs.append(_lowerCAmelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCAmelCase ) )
second_animations.append(MoveToTarget(_lowerCAmelCase ,run_time=1.5 ) )
self.play(*_lowerCAmelCase )
self.play(*_lowerCAmelCase )
self.wait()
| 9 | 0 |
'''simple docstring'''
UpperCamelCase : Union[str, Any] = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCamelCase : Optional[Any] = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCamelCase : Tuple = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 710 |
'''simple docstring'''
UpperCamelCase : Tuple = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
UpperCamelCase : list[bool | None] = [None] * 10_00_00_00
UpperCamelCase : Tuple = True
UpperCamelCase : Optional[int] = False
def A__ ( __lowerCAmelCase : int ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase__ = chain(next_number(__lowerCAmelCase ) )
lowerCamelCase__ = number_chain
while number < 1000_0000:
lowerCamelCase__ = number_chain
number *= 10
return number_chain
def A__ ( __lowerCAmelCase : int = 1000_0000 ):
for i in range(1 , __lowerCAmelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution() = }')
| 9 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase__ (__UpperCAmelCase ,unittest.TestCase ):
_UpperCamelCase = BioGptTokenizer
_UpperCamelCase = False
def UpperCamelCase_ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowerCamelCase__ = dict(zip(lowerCAmelCase_ ,range(len(lowerCAmelCase_ ) ) ) )
lowerCamelCase__ = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
lowerCamelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) )
with open(self.merges_file ,"""w""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase_ ) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = """lower newer"""
return input_text, output_text
def UpperCamelCase_ ( self ):
lowerCamelCase__ = BioGptTokenizer(self.vocab_file ,self.merges_file )
lowerCamelCase__ = """lower"""
lowerCamelCase__ = ["""low""", """er</w>"""]
lowerCamelCase__ = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ ,lowerCAmelCase_ )
lowerCamelCase__ = tokens + ["""<unk>"""]
lowerCamelCase__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) ,lowerCAmelCase_ )
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
lowerCamelCase__ = tokenizer.encode("""sequence builders""" ,add_special_tokens=lowerCAmelCase_ )
lowerCamelCase__ = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=lowerCAmelCase_ )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ ,lowerCAmelCase_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 711 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'donut-swin'
_UpperCamelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self ,_lowerCAmelCase=2_24 ,_lowerCAmelCase=4 ,_lowerCAmelCase=3 ,_lowerCAmelCase=96 ,_lowerCAmelCase=[2, 2, 6, 2] ,_lowerCAmelCase=[3, 6, 12, 24] ,_lowerCAmelCase=7 ,_lowerCAmelCase=4.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=False ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-5 ,**_lowerCAmelCase ,):
super().__init__(**_lowerCAmelCase )
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = embed_dim
lowerCamelCase__ = depths
lowerCamelCase__ = len(_lowerCAmelCase )
lowerCamelCase__ = num_heads
lowerCamelCase__ = window_size
lowerCamelCase__ = mlp_ratio
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = hidden_act
lowerCamelCase__ = use_absolute_embeddings
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
| 9 | 0 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : float , __lowerCAmelCase : float ):
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
UpperCamelCase : Optional[Any] = ['small', 'medium', 'large']
UpperCamelCase : Dict = 'lm_head.decoder.weight'
UpperCamelCase : int = 'lm_head.weight'
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
lowerCamelCase__ = torch.load(__lowerCAmelCase )
lowerCamelCase__ = d.pop(__lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
UpperCamelCase : Dict = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
UpperCamelCase : Any = os.path.join(args.dialogpt_path, F'{MODEL}_ft.pkl')
UpperCamelCase : str = F'./DialoGPT-{MODEL}'
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 9 | 0 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] ):
assert x is not None
assert y is not None
lowerCamelCase__ = len(UpperCAmelCase__ )
lowerCamelCase__ = len(UpperCAmelCase__ )
# declaring the array for storing the dp values
lowerCamelCase__ = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
lowerCamelCase__ = 1 if x[i - 1] == y[j - 1] else 0
lowerCamelCase__ = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
lowerCamelCase__ = """"""
lowerCamelCase__ , lowerCamelCase__ = m, n
while i > 0 and j > 0:
lowerCamelCase__ = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
lowerCamelCase__ = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
UpperCamelCase : List[Any] = 'AGGTAB'
UpperCamelCase : List[str] = 'GXTXAYB'
UpperCamelCase : int = 4
UpperCamelCase : Union[str, Any] = 'GTAB'
UpperCamelCase , UpperCamelCase : Any = longest_common_subsequence(a, b)
print('len =', ln, ', sub-sequence =', subseq)
import doctest
doctest.testmod()
| 713 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=30 ,_lowerCAmelCase=2 ,_lowerCAmelCase=3 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=32 ,_lowerCAmelCase=2 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=10 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=3 ,_lowerCAmelCase=0.6 ,_lowerCAmelCase=None ,):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = is_training
lowerCamelCase__ = use_labels
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = mask_ratio
lowerCamelCase__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase__ = (image_size // patch_size) ** 2
lowerCamelCase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCamelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ):
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,decoder_hidden_size=self.hidden_size ,decoder_num_hidden_layers=self.num_hidden_layers ,decoder_num_attention_heads=self.num_attention_heads ,decoder_intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = TFViTMAEModel(config=_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase )
# expected sequence length = num_patches
lowerCamelCase__ = (self.image_size // self.patch_size) ** 2
lowerCamelCase__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase__ = 1
lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase )
lowerCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase )
lowerCamelCase__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.prepare_config_and_inputs()
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = config_and_inputs
lowerCamelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase__ (a ,a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_UpperCamelCase = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def UpperCamelCase_ ( self ):
lowerCamelCase__ = TFViTMAEModelTester(self )
lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase ,hidden_size=37 )
def UpperCamelCase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
lowerCamelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase ,tf.keras.layers.Layer ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ = [*signature.parameters.keys()]
lowerCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
# make the mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
lowerCamelCase__ = copy.deepcopy(self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) )
lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase )
lowerCamelCase__ = outputs_dict[0].numpy()
lowerCamelCase__ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) ,1E-6 )
def UpperCamelCase_ ( self ):
# make the mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_lowerCAmelCase ):
lowerCamelCase__ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_lowerCAmelCase ):
lowerCamelCase__ = v.numpy()
else:
lowerCamelCase__ = np.array(_lowerCAmelCase )
return inputs_np_dict
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = prepare_numpy_arrays(_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase )
self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
# make masks reproducible
np.random.seed(2 )
lowerCamelCase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ = tf.constant(_lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase__ = tf_noise
super().check_pt_tf_models(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_lowerCAmelCase )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(_lowerCAmelCase ,_lowerCAmelCase ),)
if isinstance(_lowerCAmelCase ,_lowerCAmelCase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_lowerCAmelCase ,"""_keras_serializable""" ,_lowerCAmelCase )
}
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ = tf.convert_to_tensor(_lowerCAmelCase )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
lowerCamelCase__ = main_layer_class(_lowerCAmelCase )
lowerCamelCase__ = {
name: tf.keras.Input(tensor.shape[1:] ,dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowerCamelCase__ = tf.keras.Model(_lowerCAmelCase ,outputs=main_layer(_lowerCAmelCase ) )
lowerCamelCase__ = model(_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ = os.path.join(_lowerCAmelCase ,"""keras_model.h5""" )
model.save(_lowerCAmelCase )
lowerCamelCase__ = tf.keras.models.load_model(
_lowerCAmelCase ,custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_lowerCAmelCase ,tf.keras.Model )
lowerCamelCase__ = model(_lowerCAmelCase )
self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
lowerCamelCase__ = outputs.last_hidden_state.numpy()
lowerCamelCase__ = 0
else:
lowerCamelCase__ = outputs.logits.numpy()
lowerCamelCase__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase ,saved_model=_lowerCAmelCase )
lowerCamelCase__ = model_class.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
lowerCamelCase__ = after_outputs["""last_hidden_state"""].numpy()
lowerCamelCase__ = 0
else:
lowerCamelCase__ = after_outputs["""logits"""].numpy()
lowerCamelCase__ = 0
lowerCamelCase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase ,1E-5 )
def UpperCamelCase_ ( self ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
lowerCamelCase__ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_lowerCAmelCase )
lowerCamelCase__ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowerCamelCase__ = model_class.from_config(model.config )
lowerCamelCase__ = new_model(_lowerCAmelCase ) # Build model
new_model.set_weights(model.get_weights() )
lowerCamelCase__ = new_model(_lowerCAmelCase ,noise=_lowerCAmelCase )
self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def UpperCamelCase_ ( self ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def UpperCamelCase_ ( self ):
pass
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(_lowerCAmelCase )
def A__ ( ):
lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCamelCase__ = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
lowerCamelCase__ = self.default_image_processor
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase__ = ViTMAEConfig()
lowerCamelCase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(1, num_patches) )
# forward pass
lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase )
# verify the logits
lowerCamelCase__ = tf.convert_to_tensor([1, 1_96, 7_68] )
self.assertEqual(outputs.logits.shape ,_lowerCAmelCase )
lowerCamelCase__ = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] ,_lowerCAmelCase ,atol=1E-4 )
| 9 | 0 |
def A__ ( __lowerCAmelCase : list ):
if len(UpperCAmelCase__ ) <= 1:
return [tuple(UpperCAmelCase__ )]
lowerCamelCase__ = []
def generate(__lowerCAmelCase : int , __lowerCAmelCase : list ):
lowerCamelCase__ = [0] * n
res.append(tuple(UpperCAmelCase__ ) )
lowerCamelCase__ = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
lowerCamelCase__ , lowerCamelCase__ = arr[i], arr[0]
else:
lowerCamelCase__ , lowerCamelCase__ = arr[i], arr[c[i]]
res.append(tuple(UpperCAmelCase__ ) )
c[i] += 1
lowerCamelCase__ = 0
else:
lowerCamelCase__ = 0
i += 1
generate(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
return res
if __name__ == "__main__":
UpperCamelCase : Tuple = input('Enter numbers separated by a comma:\n').strip()
UpperCamelCase : Optional[Any] = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 714 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=7 ,_lowerCAmelCase=3 ,_lowerCAmelCase=18 ,_lowerCAmelCase=30 ,_lowerCAmelCase=4_00 ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,):
lowerCamelCase__ = size if size is not None else {"""shortest_edge""": 18}
lowerCamelCase__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = min_resolution
lowerCamelCase__ = max_resolution
lowerCamelCase__ = do_resize
lowerCamelCase__ = size
lowerCamelCase__ = do_center_crop
lowerCamelCase__ = crop_size
lowerCamelCase__ = do_normalize
lowerCamelCase__ = image_mean
lowerCamelCase__ = image_std
def UpperCamelCase_ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase__ (a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = LevitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
lowerCamelCase__ = LevitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""image_std""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""size""" ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase ,Image.Image )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase ,np.ndarray )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase ,torch.Tensor )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 9 | 0 |
from math import isqrt
def A__ ( __lowerCAmelCase : int ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(_UpperCamelCase ) + 1 ) )
def A__ ( __lowerCAmelCase : int = 10**6 ):
lowerCamelCase__ = 0
lowerCamelCase__ = 1
lowerCamelCase__ = 7
while prime_candidate < max_prime:
primes_count += is_prime(_UpperCamelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 715 |
'''simple docstring'''
import numpy
# List of input, output pairs
UpperCamelCase : List[Any] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCamelCase : Optional[int] = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
UpperCamelCase : int = [2, 4, 1, 5]
UpperCamelCase : int = len(train_data)
UpperCamelCase : Dict = 0.009
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : str="train" ):
return calculate_hypothesis_value(__lowerCAmelCase , __lowerCAmelCase ) - output(
__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : Any ):
lowerCamelCase__ = 0
for i in range(len(__lowerCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=m ):
lowerCamelCase__ = 0
for i in range(__lowerCAmelCase ):
if index == -1:
summation_value += _error(__lowerCAmelCase )
else:
summation_value += _error(__lowerCAmelCase ) * train_data[i][0][index]
return summation_value
def A__ ( __lowerCAmelCase : List[Any] ):
lowerCamelCase__ = summation_of_cost_derivative(__lowerCAmelCase , __lowerCAmelCase ) / m
return cost_derivative_value
def A__ ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCamelCase__ = 0.00_0002
lowerCamelCase__ = 0
lowerCamelCase__ = 0
while True:
j += 1
lowerCamelCase__ = [0, 0, 0, 0]
for i in range(0 , len(__lowerCAmelCase ) ):
lowerCamelCase__ = get_cost_derivative(i - 1 )
lowerCamelCase__ = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__lowerCAmelCase , __lowerCAmelCase , atol=__lowerCAmelCase , rtol=__lowerCAmelCase , ):
break
lowerCamelCase__ = temp_parameter_vector
print(("""Number of iterations:""", j) )
def A__ ( ):
for i in range(len(__lowerCAmelCase ) ):
print(("""Actual output value:""", output(__lowerCAmelCase , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(__lowerCAmelCase , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 9 | 0 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] = "x" , __lowerCAmelCase : List[str] = 10**-10 , __lowerCAmelCase : Optional[int] = 1 , ):
lowerCamelCase__ = symbols(__lowerCAmelCase )
lowerCamelCase__ = lambdify(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = lambdify(__lowerCAmelCase , diff(__lowerCAmelCase , __lowerCAmelCase ) )
lowerCamelCase__ = starting_point
while True:
if diff_function(__lowerCAmelCase ) != 0:
lowerCamelCase__ = prev_guess - multiplicity * func(__lowerCAmelCase ) / diff_function(
__lowerCAmelCase )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
lowerCamelCase__ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(F'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}')
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
F'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
F'{newton_raphson("exp(x) - 1", 10, precision=0.005)}',
)
# Find root of cos(x)
print(F'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 716 |
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] ):
lowerCamelCase__ = OmegaConf.load(__lowerCAmelCase )
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )["""model"""]
lowerCamelCase__ = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCamelCase__ = {}
lowerCamelCase__ = """first_stage_model."""
for key in keys:
if key.startswith(__lowerCAmelCase ):
lowerCamelCase__ = state_dict[key]
# extract state_dict for UNetLDM
lowerCamelCase__ = {}
lowerCamelCase__ = """model.diffusion_model."""
for key in keys:
if key.startswith(__lowerCAmelCase ):
lowerCamelCase__ = state_dict[key]
lowerCamelCase__ = config.model.params.first_stage_config.params
lowerCamelCase__ = config.model.params.unet_config.params
lowerCamelCase__ = VQModel(**__lowerCAmelCase ).eval()
vqvae.load_state_dict(__lowerCAmelCase )
lowerCamelCase__ = UNetLDMModel(**__lowerCAmelCase ).eval()
unet.load_state_dict(__lowerCAmelCase )
lowerCamelCase__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__lowerCAmelCase , )
lowerCamelCase__ = LDMPipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
pipeline.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
UpperCamelCase : List[Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 9 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase : Dict = {
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : str = ["MobileViTFeatureExtractor"]
UpperCamelCase : Optional[Any] = ["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = [
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
UpperCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 717 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS}
UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : tuple[int, ...] ):
lowerCamelCase__ = ""
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
for keychar, cipherchar in zip(cycle(__lowerCAmelCase ) , __lowerCAmelCase ):
lowerCamelCase__ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__lowerCAmelCase )
return decoded
def A__ ( __lowerCAmelCase : list[int] ):
lowerCamelCase__ = []
for key in product(__lowerCAmelCase , repeat=3 ):
lowerCamelCase__ = try_key(__lowerCAmelCase , __lowerCAmelCase )
if encoded is not None:
possibles.append(__lowerCAmelCase )
return possibles
def A__ ( __lowerCAmelCase : list[str] , __lowerCAmelCase : str ):
return [possible for possible in possibles if common_word in possible.lower()]
def A__ ( __lowerCAmelCase : str = "p059_cipher.txt" ):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = Path(__lowerCAmelCase ).parent.joinpath(__lowerCAmelCase ).read_text(encoding="""utf-8""" )
lowerCamelCase__ = [int(__lowerCAmelCase ) for number in data.strip().split(""",""" )]
lowerCamelCase__ = filter_valid_chars(__lowerCAmelCase )
for common_word in COMMON_WORDS:
lowerCamelCase__ = filter_common_word(__lowerCAmelCase , __lowerCAmelCase )
if len(__lowerCAmelCase ) == 1:
break
lowerCamelCase__ = possibles[0]
return sum(ord(__lowerCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F'{solution() = }')
| 9 | 0 |
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def A__ ( __lowerCAmelCase : int ):
if isinstance(snake_case_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class UpperCamelCase__ :
'''simple docstring'''
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCAmelCase ,_lowerCAmelCase ,F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ,**_lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = FlaxVisionTextDualEncoderModel(_lowerCAmelCase )
lowerCamelCase__ = model(input_ids=_lowerCAmelCase ,pixel_values=_lowerCAmelCase ,attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape ,(input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape ,(pixel_values.shape[0], config.projection_dim) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ,**_lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.get_vision_text_model(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = {'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
lowerCamelCase__ = model(input_ids=_lowerCAmelCase ,pixel_values=_lowerCAmelCase ,attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape ,(input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape ,(pixel_values.shape[0], model.config.projection_dim) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ,**_lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.get_vision_text_model(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = {'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
lowerCamelCase__ = model(input_ids=_lowerCAmelCase ,pixel_values=_lowerCAmelCase ,attention_mask=_lowerCAmelCase )
lowerCamelCase__ = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
lowerCamelCase__ = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = model(input_ids=_lowerCAmelCase ,pixel_values=_lowerCAmelCase ,attention_mask=_lowerCAmelCase )
lowerCamelCase__ = after_output[0]
lowerCamelCase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase ,1E-3 )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ,**_lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.get_vision_text_model(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = {'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
lowerCamelCase__ = model(
input_ids=_lowerCAmelCase ,pixel_values=_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,output_attentions=_lowerCAmelCase )
lowerCamelCase__ = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) ,vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ = to_atuple(vision_model.config.image_size )
lowerCamelCase__ = to_atuple(vision_model.config.patch_size )
lowerCamelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCamelCase__ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] ,(vision_config.num_attention_heads, seq_len, seq_len) )
lowerCamelCase__ = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) ,text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] ,(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
pt_model.to(_lowerCAmelCase )
pt_model.eval()
# prepare inputs
lowerCamelCase__ = inputs_dict
lowerCamelCase__ = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowerCamelCase__ = pt_model(**_lowerCAmelCase ).to_tuple()
lowerCamelCase__ = fx_model(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) ,len(_lowerCAmelCase ) ,"""Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] ,pt_outputs[:4] ):
self.assert_almost_equals(_lowerCAmelCase ,pt_output.numpy() ,4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_lowerCAmelCase )
lowerCamelCase__ = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase ,from_pt=_lowerCAmelCase )
lowerCamelCase__ = fx_model_loaded(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) ,len(_lowerCAmelCase ) ,"""Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] ,pt_outputs[:4] ):
self.assert_almost_equals(_lowerCAmelCase ,pt_output.numpy() ,4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_lowerCAmelCase )
lowerCamelCase__ = VisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase ,from_flax=_lowerCAmelCase )
pt_model_loaded.to(_lowerCAmelCase )
pt_model_loaded.eval()
with torch.no_grad():
lowerCamelCase__ = pt_model_loaded(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) ,len(_lowerCAmelCase ) ,"""Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] ,pt_outputs_loaded[:4] ):
self.assert_almost_equals(_lowerCAmelCase ,pt_output_loaded.numpy() ,4E-2 )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = VisionTextDualEncoderModel(_lowerCAmelCase )
lowerCamelCase__ = FlaxVisionTextDualEncoderModel(_lowerCAmelCase )
lowerCamelCase__ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() ,_lowerCAmelCase )
lowerCamelCase__ = fx_state
self.check_pt_flax_equivalence(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = VisionTextDualEncoderModel(_lowerCAmelCase )
lowerCamelCase__ = FlaxVisionTextDualEncoderModel(_lowerCAmelCase )
lowerCamelCase__ = load_flax_weights_in_pytorch_model(_lowerCAmelCase ,fx_model.params )
self.check_pt_flax_equivalence(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCAmelCase )
def UpperCamelCase_ ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase )
def UpperCamelCase_ ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCAmelCase )
def UpperCamelCase_ ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCAmelCase )
@is_pt_flax_cross_test
def UpperCamelCase_ ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ = config_inputs_dict.pop("""vision_config""" )
lowerCamelCase__ = config_inputs_dict.pop("""text_config""" )
lowerCamelCase__ = config_inputs_dict
self.check_equivalence_pt_to_flax(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
self.check_equivalence_flax_to_pt(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_pretrained_model_and_inputs()
lowerCamelCase__ = model_a(**_lowerCAmelCase )
lowerCamelCase__ = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCAmelCase )
lowerCamelCase__ = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = model_a(**_lowerCAmelCase )
lowerCamelCase__ = after_outputs[0]
lowerCamelCase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase ,1E-5 )
@require_flax
class UpperCamelCase__ (UpperCamelCase_ ,unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
lowerCamelCase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" ,"""hf-internal-testing/tiny-bert""" ,vision_from_pt=_lowerCAmelCase ,text_from_pt=_lowerCAmelCase ,)
lowerCamelCase__ = 13
lowerCamelCase__ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCamelCase__ = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size )
lowerCamelCase__ = random_attention_mask([batch_size, 4] )
lowerCamelCase__ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = FlaxViTModel(_lowerCAmelCase )
lowerCamelCase__ = FlaxBertModel(_lowerCAmelCase )
return vision_model, text_model
def UpperCamelCase_ ( self ):
'''simple docstring'''
lowerCamelCase__ = FlaxViTModelTester(self )
lowerCamelCase__ = FlaxBertModelTester(self )
lowerCamelCase__ = vit_model_tester.prepare_config_and_inputs()
lowerCamelCase__ = bert_model_tester.prepare_config_and_inputs()
lowerCamelCase__ = vision_config_and_inputs
lowerCamelCase__ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class UpperCamelCase__ (UpperCamelCase_ ,unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
lowerCamelCase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" ,"""hf-internal-testing/tiny-bert""" ,vision_from_pt=_lowerCAmelCase ,text_from_pt=_lowerCAmelCase ,)
lowerCamelCase__ = 13
lowerCamelCase__ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCamelCase__ = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size )
lowerCamelCase__ = random_attention_mask([batch_size, 4] )
lowerCamelCase__ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = FlaxCLIPVisionModel(_lowerCAmelCase )
lowerCamelCase__ = FlaxBertModel(_lowerCAmelCase )
return vision_model, text_model
def UpperCamelCase_ ( self ):
'''simple docstring'''
lowerCamelCase__ = FlaxCLIPVisionModelTester(self )
lowerCamelCase__ = FlaxBertModelTester(self )
lowerCamelCase__ = clip_model_tester.prepare_config_and_inputs()
lowerCamelCase__ = bert_model_tester.prepare_config_and_inputs()
lowerCamelCase__ = vision_config_and_inputs
lowerCamelCase__ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
lowerCamelCase__ = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" ,logit_scale_init_value=1.0 )
lowerCamelCase__ = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase__ = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] ,images=_lowerCAmelCase ,padding=_lowerCAmelCase ,return_tensors="""np""" )
lowerCamelCase__ = model(**_lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape ,(inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape ,(inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) ,)
lowerCamelCase__ = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image ,_lowerCAmelCase ,atol=1E-3 ) )
| 718 |
'''simple docstring'''
import argparse
import struct
import unittest
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ):
lowerCamelCase__ = data
# Initialize hash values
lowerCamelCase__ = [
0x6a_09_e6_67,
0xbb_67_ae_85,
0x3c_6e_f3_72,
0xa5_4f_f5_3a,
0x51_0e_52_7f,
0x9b_05_68_8c,
0x1f_83_d9_ab,
0x5b_e0_cd_19,
]
# Initialize round constants
lowerCamelCase__ = [
0x42_8a_2f_98,
0x71_37_44_91,
0xb5_c0_fb_cf,
0xe9_b5_db_a5,
0x39_56_c2_5b,
0x59_f1_11_f1,
0x92_3f_82_a4,
0xab_1c_5e_d5,
0xd8_07_aa_98,
0x12_83_5b_01,
0x24_31_85_be,
0x55_0c_7d_c3,
0x72_be_5d_74,
0x80_de_b1_fe,
0x9b_dc_06_a7,
0xc1_9b_f1_74,
0xe4_9b_69_c1,
0xef_be_47_86,
0x0f_c1_9d_c6,
0x24_0c_a1_cc,
0x2d_e9_2c_6f,
0x4a_74_84_aa,
0x5c_b0_a9_dc,
0x76_f9_88_da,
0x98_3e_51_52,
0xa8_31_c6_6d,
0xb0_03_27_c8,
0xbf_59_7f_c7,
0xc6_e0_0b_f3,
0xd5_a7_91_47,
0x06_ca_63_51,
0x14_29_29_67,
0x27_b7_0a_85,
0x2e_1b_21_38,
0x4d_2c_6d_fc,
0x53_38_0d_13,
0x65_0a_73_54,
0x76_6a_0a_bb,
0x81_c2_c9_2e,
0x92_72_2c_85,
0xa2_bf_e8_a1,
0xa8_1a_66_4b,
0xc2_4b_8b_70,
0xc7_6c_51_a3,
0xd1_92_e8_19,
0xd6_99_06_24,
0xf4_0e_35_85,
0x10_6a_a0_70,
0x19_a4_c1_16,
0x1e_37_6c_08,
0x27_48_77_4c,
0x34_b0_bc_b5,
0x39_1c_0c_b3,
0x4e_d8_aa_4a,
0x5b_9c_ca_4f,
0x68_2e_6f_f3,
0x74_8f_82_ee,
0x78_a5_63_6f,
0x84_c8_78_14,
0x8c_c7_02_08,
0x90_be_ff_fa,
0xa4_50_6c_eb,
0xbe_f9_a3_f7,
0xc6_71_78_f2,
]
lowerCamelCase__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def UpperCamelCase_ ( _lowerCAmelCase ):
lowerCamelCase__ = B"""\x80""" + (B"""\x00""" * (63 - (len(_lowerCAmelCase ) + 8) % 64))
lowerCamelCase__ = struct.pack(""">Q""" ,(len(_lowerCAmelCase ) * 8) )
return data + padding + big_endian_integer
def UpperCamelCase_ ( self ):
# Convert into blocks of 64 bytes
lowerCamelCase__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowerCamelCase__ = list(struct.unpack(""">16L""" ,_lowerCAmelCase ) )
# add 48 0-ed integers
words += [0] * 48
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowerCamelCase__ = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
lowerCamelCase__ = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
lowerCamelCase__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
lowerCamelCase__ = self.ror(_lowerCAmelCase ,6 ) ^ self.ror(_lowerCAmelCase ,11 ) ^ self.ror(_lowerCAmelCase ,25 )
lowerCamelCase__ = (e & f) ^ ((~e & 0xff_ff_ff_ff) & g)
lowerCamelCase__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
lowerCamelCase__ = self.ror(_lowerCAmelCase ,2 ) ^ self.ror(_lowerCAmelCase ,13 ) ^ self.ror(_lowerCAmelCase ,22 )
lowerCamelCase__ = (a & b) ^ (a & c) ^ (b & c)
lowerCamelCase__ = (sa + maj) % 0x1_00_00_00_00
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
lowerCamelCase__ = [a, b, c, d, e, f, g, h]
# Modify final values
lowerCamelCase__ = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
lowerCamelCase__ = """""".join([hex(_lowerCAmelCase )[2:].zfill(8 ) for value in self.hashes] )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
return 0xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations)
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
import hashlib
lowerCamelCase__ = bytes("""Test String""" ,"""utf-8""" )
self.assertEqual(SHAaaa(_lowerCAmelCase ).hash ,hashlib.shaaaa(_lowerCAmelCase ).hexdigest() )
def A__ ( ):
import doctest
doctest.testmod()
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
lowerCamelCase__ = f.read()
else:
lowerCamelCase__ = bytes(__lowerCAmelCase , """utf-8""" )
print(SHAaaa(__lowerCAmelCase ).hash )
if __name__ == "__main__":
main()
| 9 | 0 |
'''simple docstring'''
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
UpperCamelCase : Dict = 0b1_0_1_1_0_0_1_1_1_1_1_0_1_1_0_0_1_0_0_1_0_0_0_0_0_1_1_1_1_0_1_1_1_0_1_1_0_0_0_1_1_0_0_1_1_1_1_0
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
UpperCamelCase : Any = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ):
lowerCamelCase__ = WATERMARK_BITS
lowerCamelCase__ = WatermarkEncoder()
self.encoder.set_watermark("""bits""" ,self.watermark )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
# can't encode images that are smaller than 256
if images.shape[-1] < 2_56:
return images
lowerCamelCase__ = (2_55 * (images / 2 + 0.5)).cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
lowerCamelCase__ = [self.encoder.encode(_lowerCAmelCase ,"""dwtDct""" ) for image in images]
lowerCamelCase__ = torch.from_numpy(np.array(_lowerCAmelCase ) ).permute(0 ,3 ,1 ,2 )
lowerCamelCase__ = torch.clamp(2 * (images / 2_55 - 0.5) ,min=-1.0 ,max=1.0 )
return images
| 719 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def A__ ( __lowerCAmelCase : Union[str, Any] ):
lowerCamelCase__ = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : Tuple ):
lowerCamelCase__ , lowerCamelCase__ = emb.weight.shape
lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
lowerCamelCase__ = emb.weight.data
return lin_layer
def A__ ( __lowerCAmelCase : Dict ):
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
lowerCamelCase__ = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
lowerCamelCase__ = mam_aaa["""model"""]
remove_ignore_keys_(__lowerCAmelCase )
lowerCamelCase__ = state_dict["""encoder.embed_tokens.weight"""].shape[0]
lowerCamelCase__ = MaMaaaConfig(
vocab_size=__lowerCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
lowerCamelCase__ = state_dict["""decoder.embed_tokens.weight"""]
lowerCamelCase__ = MaMaaaForConditionalGeneration(__lowerCAmelCase )
model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
lowerCamelCase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
UpperCamelCase : Tuple = parser.parse_args()
UpperCamelCase : List[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 9 | 0 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class UpperCamelCase__ (datasets.BeamBasedBuilder ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) ,supervised_keys=_lowerCAmelCase ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_lowerCAmelCase )
class UpperCamelCase__ (datasets.BeamBasedBuilder ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) ,supervised_keys=_lowerCAmelCase ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_lowerCAmelCase )
def A__ ( ):
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def A__ ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class UpperCamelCase__ (a ):
'''simple docstring'''
@require_beam
def UpperCamelCase_ ( self ):
lowerCamelCase__ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase__ = DummyBeamDataset(cache_dir=_lowerCAmelCase ,beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_lowerCAmelCase ,builder.name ,"""default""" ,"""0.0.0""" ,F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features ,datasets.Features({"""content""": datasets.Value("""string""" )} ) )
lowerCamelCase__ = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows ,_lowerCAmelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples ,_lowerCAmelCase )
self.assertDictEqual(dset["""train"""][0] ,get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] ,get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_lowerCAmelCase ,builder.name ,"""default""" ,"""0.0.0""" ,"""dataset_info.json""" ) ) )
del dset
@require_beam
def UpperCamelCase_ ( self ):
import apache_beam as beam
lowerCamelCase__ = beam.io.parquetio.WriteToParquet
lowerCamelCase__ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase__ = DummyBeamDataset(cache_dir=_lowerCAmelCase ,beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
lowerCamelCase__ = partial(_lowerCAmelCase ,num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_lowerCAmelCase ,builder.name ,"""default""" ,"""0.0.0""" ,F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_lowerCAmelCase ,builder.name ,"""default""" ,"""0.0.0""" ,F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features ,datasets.Features({"""content""": datasets.Value("""string""" )} ) )
lowerCamelCase__ = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows ,_lowerCAmelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples ,_lowerCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) ,sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(_lowerCAmelCase ,builder.name ,"""default""" ,"""0.0.0""" ,"""dataset_info.json""" ) ) )
del dset
@require_beam
def UpperCamelCase_ ( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase__ = DummyBeamDataset(cache_dir=_lowerCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions ,builder.download_and_prepare )
@require_beam
def UpperCamelCase_ ( self ):
lowerCamelCase__ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase__ = NestedBeamDataset(cache_dir=_lowerCAmelCase ,beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_lowerCAmelCase ,builder.name ,"""default""" ,"""0.0.0""" ,F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features ,datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
lowerCamelCase__ = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows ,_lowerCAmelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples ,_lowerCAmelCase )
self.assertDictEqual(dset["""train"""][0] ,get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] ,get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_lowerCAmelCase ,builder.name ,"""default""" ,"""0.0.0""" ,"""dataset_info.json""" ) ) )
del dset
| 720 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = BlipImageProcessor()
lowerCamelCase__ = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
lowerCamelCase__ = BlipProcessor(_lowerCAmelCase ,_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).tokenizer
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).image_processor
def UpperCamelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
lowerCamelCase__ = [Image.fromarray(np.moveaxis(_lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self ):
lowerCamelCase__ = BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
lowerCamelCase__ = self.get_image_processor(do_normalize=_lowerCAmelCase ,padding_value=1.0 )
lowerCamelCase__ = BlipProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=_lowerCAmelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = image_processor(_lowerCAmelCase ,return_tensors="""np""" )
lowerCamelCase__ = processor(images=_lowerCAmelCase ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = processor(text=_lowerCAmelCase )
lowerCamelCase__ = tokenizer(_lowerCAmelCase ,return_token_type_ids=_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ = processor.batch_decode(_lowerCAmelCase )
lowerCamelCase__ = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
| 9 | 0 |
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
UpperCamelCase : Any = {'UserAgent': UserAgent().random}
def A__ ( __lowerCAmelCase : List[Any] ):
lowerCamelCase__ = script.contents[0]
lowerCamelCase__ = json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ):
lowerCamelCase__ = F'''https://www.instagram.com/{username}/'''
lowerCamelCase__ = self.get_json()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = requests.get(self.url ,headers=_SCREAMING_SNAKE_CASE ).text
lowerCamelCase__ = BeautifulSoup(_SCREAMING_SNAKE_CASE ,"""html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ):
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def UpperCamelCase_ ( self ):
return self.user_data["username"]
@property
def UpperCamelCase_ ( self ):
return self.user_data["full_name"]
@property
def UpperCamelCase_ ( self ):
return self.user_data["biography"]
@property
def UpperCamelCase_ ( self ):
return self.user_data["business_email"]
@property
def UpperCamelCase_ ( self ):
return self.user_data["external_url"]
@property
def UpperCamelCase_ ( self ):
return self.user_data["edge_followed_by"]["count"]
@property
def UpperCamelCase_ ( self ):
return self.user_data["edge_follow"]["count"]
@property
def UpperCamelCase_ ( self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def UpperCamelCase_ ( self ):
return self.user_data["profile_pic_url_hd"]
@property
def UpperCamelCase_ ( self ):
return self.user_data["is_verified"]
@property
def UpperCamelCase_ ( self ):
return self.user_data["is_private"]
def A__ ( __lowerCAmelCase : str = "github" ):
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
lowerCamelCase__ = InstagramUser(__lowerCAmelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __lowerCAmelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 12_0000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Union[str, Any] = InstagramUser('github')
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 721 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def A__ ( __lowerCAmelCase : Union[str, Any] ):
if hor == 128:
lowerCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
lowerCamelCase__ = (32, 128, 256)
lowerCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
lowerCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
lowerCamelCase__ = (32, 64, 128, 256)
lowerCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
lowerCamelCase__ = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
lowerCamelCase__ = model.state_dict()
lowerCamelCase__ = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_5536,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
lowerCamelCase__ = UNetaDModel(**__lowerCAmelCase )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase__ = state_dict.pop(__lowerCAmelCase )
hf_value_function.load_state_dict(__lowerCAmelCase )
torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , """w""" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( ):
lowerCamelCase__ = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 128, 256),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_5536,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
lowerCamelCase__ = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
lowerCamelCase__ = model
lowerCamelCase__ = UNetaDModel(**__lowerCAmelCase )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase__ = state_dict.pop(__lowerCAmelCase )
hf_value_function.load_state_dict(__lowerCAmelCase )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 9 | 0 |
UpperCamelCase : List[str] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCamelCase : str = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCamelCase : Any = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 700 |
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase = "▁" ,_lowerCAmelCase = True ,_lowerCAmelCase = "<unk>" ,_lowerCAmelCase = "</s>" ,_lowerCAmelCase = "<pad>" ,):
lowerCamelCase__ = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
lowerCamelCase__ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
lowerCamelCase__ = token_dict["""token"""]
lowerCamelCase__ = Tokenizer(Unigram() )
lowerCamelCase__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ) ,""" """ ),
normalizers.Lowercase(),
] )
lowerCamelCase__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ),
pre_tokenizers.Digits(individual_digits=_lowerCAmelCase ),
pre_tokenizers.Punctuation(),
] )
lowerCamelCase__ = decoders.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase )
lowerCamelCase__ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' ,special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] ,)
lowerCamelCase__ = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,):
lowerCamelCase__ = trainers.UnigramTrainer(
vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,)
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = [files]
self._tokenizer.train(_lowerCAmelCase ,trainer=_lowerCAmelCase )
self.add_unk_id()
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,):
lowerCamelCase__ = trainers.UnigramTrainer(
vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,)
self._tokenizer.train_from_iterator(_lowerCAmelCase ,trainer=_lowerCAmelCase )
self.add_unk_id()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = json.loads(self._tokenizer.to_str() )
lowerCamelCase__ = self.special_tokens["""unk"""]["""id"""]
lowerCamelCase__ = Tokenizer.from_str(json.dumps(_lowerCAmelCase ) )
| 9 | 0 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
UpperCamelCase : Dict = ''
UpperCamelCase : Optional[Any] = ''
UpperCamelCase : int = ''
UpperCamelCase : Optional[int] = ''
def A__ ( __lowerCAmelCase : str ):
# authorize twitter, initialize tweepy
lowerCamelCase__ = tweepy.OAuthHandler(__lowerCAmelCase , __lowerCAmelCase )
auth.set_access_token(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = tweepy.API(__lowerCAmelCase )
# initialize a list to hold all the tweepy Tweets
lowerCamelCase__ = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCamelCase__ = api.user_timeline(screen_name=__lowerCAmelCase , count=200 )
# save most recent tweets
alltweets.extend(__lowerCAmelCase )
# save the id of the oldest tweet less one
lowerCamelCase__ = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__lowerCAmelCase ) > 0:
print(F'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
lowerCamelCase__ = api.user_timeline(
screen_name=__lowerCAmelCase , count=200 , max_id=__lowerCAmelCase )
# save most recent tweets
alltweets.extend(__lowerCAmelCase )
# update the id of the oldest tweet less one
lowerCamelCase__ = alltweets[-1].id - 1
print(F'''...{len(__lowerCAmelCase )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCamelCase__ = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'''new_{screen_name}_tweets.csv''' , """w""" ) as f:
lowerCamelCase__ = csv.writer(__lowerCAmelCase )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(__lowerCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('FirePing32')
| 701 |
'''simple docstring'''
from __future__ import annotations
import math
def A__ ( __lowerCAmelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
UpperCamelCase : str = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def A__ ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
lowerCamelCase__ = []
for num in range(len(__lowerCAmelCase ) ):
lowerCamelCase__ = 0
while 2 * i * i <= odd_composites[num]:
lowerCamelCase__ = odd_composites[num] - 2 * i * i
if is_prime(__lowerCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__lowerCAmelCase ) == n:
return list_nums
return []
def A__ ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 9 | 0 |
'''simple docstring'''
import re
import subprocess
import sys
UpperCamelCase : List[Any] = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
UpperCamelCase : List[Any] = subprocess.check_output(F'git diff --name-only {fork_point_sha}'.split()).decode('utf-8').split()
UpperCamelCase : List[Any] = '|'.join(sys.argv[1:])
UpperCamelCase : str = re.compile(rF'^({joined_dirs}).*?\.py$')
UpperCamelCase : Any = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 702 |
'''simple docstring'''
def A__ ( ):
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(__lowerCAmelCase , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 9 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'}
UpperCamelCase : Dict = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
UpperCamelCase : List[Any] = {
'camembert-base': 5_12,
}
UpperCamelCase : List[str] = '▁'
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = ['input_ids', 'attention_mask']
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="<unk>" ,_lowerCAmelCase="<pad>" ,_lowerCAmelCase="<mask>" ,_lowerCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] ,_lowerCAmelCase = None ,**_lowerCAmelCase ,):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else mask_token
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase ,eos_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,additional_special_tokens=_lowerCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_lowerCAmelCase ,)
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
lowerCamelCase__ = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowerCamelCase__ = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3}
lowerCamelCase__ = len(self.fairseq_tokens_to_ids )
lowerCamelCase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
lowerCamelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase ,token_ids_a=_lowerCAmelCase ,already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase_ ( self ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
return self.sp_model.encode(_lowerCAmelCase ,out_type=_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_lowerCAmelCase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = []
lowerCamelCase__ = """"""
lowerCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowerCamelCase__ = True
lowerCamelCase__ = []
else:
current_sub_tokens.append(_lowerCAmelCase )
lowerCamelCase__ = False
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def __getstate__( self ):
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
return state
def __setstate__( self ,_lowerCAmelCase ):
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ = os.path.join(
_lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase ,"""wb""" ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 703 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'}
UpperCamelCase : Dict = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
UpperCamelCase : List[Any] = {
'camembert-base': 5_12,
}
UpperCamelCase : List[str] = '▁'
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = ['input_ids', 'attention_mask']
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="<unk>" ,_lowerCAmelCase="<pad>" ,_lowerCAmelCase="<mask>" ,_lowerCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] ,_lowerCAmelCase = None ,**_lowerCAmelCase ,):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else mask_token
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase ,eos_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,additional_special_tokens=_lowerCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_lowerCAmelCase ,)
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
lowerCamelCase__ = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowerCamelCase__ = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3}
lowerCamelCase__ = len(self.fairseq_tokens_to_ids )
lowerCamelCase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
lowerCamelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase ,token_ids_a=_lowerCAmelCase ,already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase_ ( self ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
return self.sp_model.encode(_lowerCAmelCase ,out_type=_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_lowerCAmelCase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = []
lowerCamelCase__ = """"""
lowerCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowerCamelCase__ = True
lowerCamelCase__ = []
else:
current_sub_tokens.append(_lowerCAmelCase )
lowerCamelCase__ = False
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def __getstate__( self ):
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
return state
def __setstate__( self ,_lowerCAmelCase ):
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ = os.path.join(
_lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase ,"""wb""" ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 9 | 0 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ):
return [sentence[i : i + ngram_size] for i in range(len(__lowerCAmelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 704 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = R"""\w+[.]\d+"""
lowerCamelCase__ = re.findall(__lowerCAmelCase , __lowerCAmelCase )
for pat in pats:
lowerCamelCase__ = key.replace(__lowerCAmelCase , """_""".join(pat.split(""".""" ) ) )
return key
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ):
lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCamelCase__ = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
lowerCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase__ = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase__ = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple=42 ):
# Step 1: Convert pytorch tensor to numpy
lowerCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCamelCase__ = flax_model.init_weights(PRNGKey(__lowerCAmelCase ) )
lowerCamelCase__ = flatten_dict(__lowerCAmelCase )
lowerCamelCase__ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase__ = rename_key(__lowerCAmelCase )
lowerCamelCase__ = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
lowerCamelCase__ , lowerCamelCase__ = rename_key_and_reshape_tensor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
lowerCamelCase__ = jnp.asarray(__lowerCAmelCase )
return unflatten_dict(__lowerCAmelCase )
| 9 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : List[Any] = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'lilt'
def __init__( self ,_lowerCAmelCase=3_05_22 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=30_72 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=5_12 ,_lowerCAmelCase=2 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-12 ,_lowerCAmelCase=0 ,_lowerCAmelCase="absolute" ,_lowerCAmelCase=None ,_lowerCAmelCase=4 ,_lowerCAmelCase=10_24 ,**_lowerCAmelCase ,):
super().__init__(pad_token_id=_lowerCAmelCase ,**_lowerCAmelCase )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = position_embedding_type
lowerCamelCase__ = classifier_dropout
lowerCamelCase__ = channel_shrink_ratio
lowerCamelCase__ = max_ad_position_embeddings
| 705 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] ,model_result["""ss"""] ):
lowerCamelCase__ = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sgugger/tiny-distilbert-classification"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,only_pretrain_model=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """patrickvonplaten/t5-tiny-random"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,configs=[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,"""Cannot do xla on CPU.""" )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=_lowerCAmelCase ,save_to_csv=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ,inference_memory_csv_file=os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ,env_info_csv_file=os.path.join(_lowerCAmelCase ,"""env.csv""" ) ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""env.csv""" ) ).exists() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(_lowerCAmelCase ):
self.assertTrue(hasattr(_lowerCAmelCase ,"""sequential""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""cumulative""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""current""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(_lowerCAmelCase ,"""log.txt""" ) ,log_print=_lowerCAmelCase ,trace_memory_line_by_line=_lowerCAmelCase ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""log.txt""" ) ).exists() )
| 9 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] ,model_result["""ss"""] ):
lowerCamelCase__ = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sgugger/tiny-distilbert-classification"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,only_pretrain_model=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """patrickvonplaten/t5-tiny-random"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,configs=[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,"""Cannot do xla on CPU.""" )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=_lowerCAmelCase ,save_to_csv=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ,inference_memory_csv_file=os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ,env_info_csv_file=os.path.join(_lowerCAmelCase ,"""env.csv""" ) ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""env.csv""" ) ).exists() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(_lowerCAmelCase ):
self.assertTrue(hasattr(_lowerCAmelCase ,"""sequential""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""cumulative""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""current""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(_lowerCAmelCase ,"""log.txt""" ) ,log_print=_lowerCAmelCase ,trace_memory_line_by_line=_lowerCAmelCase ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""log.txt""" ) ).exists() )
| 706 |
'''simple docstring'''
from math import factorial
UpperCamelCase : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def A__ ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__lowerCAmelCase ) )
def A__ ( __lowerCAmelCase : int = 60 , __lowerCAmelCase : int = 100_0000 ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
lowerCamelCase__ = 0
# the cached sizes of the previous chains
lowerCamelCase__ = {}
for start_chain_element in range(1 , __lowerCAmelCase ):
# The temporary set will contain the elements of the chain
lowerCamelCase__ = set()
lowerCamelCase__ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCamelCase__ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__lowerCAmelCase )
chain_set_length += 1
lowerCamelCase__ = digit_factorial_sum(__lowerCAmelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCamelCase__ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution()}')
| 9 | 0 |
'''simple docstring'''
UpperCamelCase : Tuple = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCamelCase : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCamelCase : List[Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 707 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
UpperCamelCase : Optional[Any] = 'src/diffusers'
# Matches is_xxx_available()
UpperCamelCase : Union[str, Any] = re.compile(r'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
UpperCamelCase : Optional[Any] = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
UpperCamelCase : Optional[int] = '\n{0} = None\n'
UpperCamelCase : Optional[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
UpperCamelCase : Any = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def A__ ( __lowerCAmelCase : Union[str, Any] ):
lowerCamelCase__ = _re_backend.findall(__lowerCAmelCase )
if len(__lowerCAmelCase ) == 0:
return None
return "_and_".join(__lowerCAmelCase )
def A__ ( ):
with open(os.path.join(__lowerCAmelCase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase__ = 0
lowerCamelCase__ = {}
# Go through the end of the file
while line_index < len(__lowerCAmelCase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
lowerCamelCase__ = []
# Until we unindent, add backend objects to the list
while line_index < len(__lowerCAmelCase ) and len(lines[line_index] ) > 1:
lowerCamelCase__ = lines[line_index]
lowerCamelCase__ = _re_single_line_import.search(__lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__lowerCAmelCase ) > 0:
lowerCamelCase__ = objects
else:
line_index += 1
return backend_specific_objects
def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict ):
if name.isupper():
return DUMMY_CONSTANT.format(__lowerCAmelCase )
elif name.islower():
return DUMMY_FUNCTION.format(__lowerCAmelCase , __lowerCAmelCase )
else:
return DUMMY_CLASS.format(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : Optional[int]=None ):
if backend_specific_objects is None:
lowerCamelCase__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase__ = {}
for backend, objects in backend_specific_objects.items():
lowerCamelCase__ = """[""" + """, """.join(F'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]"""
lowerCamelCase__ = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__lowerCAmelCase , __lowerCAmelCase ) for o in objects] )
lowerCamelCase__ = dummy_file
return dummy_files
def A__ ( __lowerCAmelCase : List[str]=False ):
lowerCamelCase__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase__ = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
lowerCamelCase__ = os.path.join(__lowerCAmelCase , """utils""" )
lowerCamelCase__ = {
backend: os.path.join(__lowerCAmelCase , F'''dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py''' )
for backend in dummy_files.keys()
}
lowerCamelCase__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__lowerCAmelCase ):
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.read()
else:
lowerCamelCase__ = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py as the main '''
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
F'''diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py. Run `make fix-copies` '''
"""to fix this.""" )
if __name__ == "__main__":
UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase : Any = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 9 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
UpperCamelCase : List[Any] = logging.getLogger(__name__)
@dataclass(frozen=a )
class UpperCamelCase__ :
'''simple docstring'''
_UpperCamelCase = 42
_UpperCamelCase = 42
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
@dataclass(frozen=a )
class UpperCamelCase__ :
'''simple docstring'''
_UpperCamelCase = 42
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 42
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase=False ,_lowerCAmelCase = False ,):
lowerCamelCase__ = hans_processors[task]()
lowerCamelCase__ = os.path.join(
_lowerCAmelCase ,"""cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" ,tokenizer.__class__.__name__ ,str(_lowerCAmelCase ) ,_lowerCAmelCase ,) ,)
lowerCamelCase__ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ , lowerCamelCase__ = label_list[2], label_list[1]
lowerCamelCase__ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ = cached_features_file + """.lock"""
with FileLock(_lowerCAmelCase ):
if os.path.exists(_lowerCAmelCase ) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''' )
lowerCamelCase__ = torch.load(_lowerCAmelCase )
else:
logger.info(F'''Creating features from dataset file at {data_dir}''' )
lowerCamelCase__ = (
processor.get_dev_examples(_lowerCAmelCase ) if evaluate else processor.get_train_examples(_lowerCAmelCase )
)
logger.info("""Training examples: %s""" ,len(_lowerCAmelCase ) )
lowerCamelCase__ = hans_convert_examples_to_features(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
logger.info("""Saving features into cached file %s""" ,_lowerCAmelCase )
torch.save(self.features ,_lowerCAmelCase )
def __len__( self ):
return len(self.features )
def __getitem__( self ,_lowerCAmelCase ):
return self.features[i]
def UpperCamelCase_ ( self ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class UpperCamelCase__ :
'''simple docstring'''
_UpperCamelCase = 42
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = 1_28 ,_lowerCAmelCase=False ,_lowerCAmelCase = False ,):
lowerCamelCase__ = hans_processors[task]()
lowerCamelCase__ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ , lowerCamelCase__ = label_list[2], label_list[1]
lowerCamelCase__ = label_list
lowerCamelCase__ = processor.get_dev_examples(_lowerCAmelCase ) if evaluate else processor.get_train_examples(_lowerCAmelCase )
lowerCamelCase__ = hans_convert_examples_to_features(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc="""convert examples to features""" ):
if ex_index % 1_00_00 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(_lowerCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowerCamelCase__ = tf.data.Dataset.from_generator(
_lowerCAmelCase ,(
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) ,(
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def UpperCamelCase_ ( self ):
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self ,_lowerCAmelCase ):
return self.features[i]
def UpperCamelCase_ ( self ):
return self.label_list
class UpperCamelCase__ (a ):
'''simple docstring'''
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
return self._create_examples(self._read_tsv(os.path.join(_lowerCAmelCase ,"""heuristics_train_set.txt""" ) ) ,"""train""" )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
return self._create_examples(self._read_tsv(os.path.join(_lowerCAmelCase ,"""heuristics_evaluation_set.txt""" ) ) ,"""dev""" )
def UpperCamelCase_ ( self ):
return ["contradiction", "entailment", "neutral"]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = []
for i, line in enumerate(_lowerCAmelCase ):
if i == 0:
continue
lowerCamelCase__ = """%s-%s""" % (set_type, line[0])
lowerCamelCase__ = line[5]
lowerCamelCase__ = line[6]
lowerCamelCase__ = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
lowerCamelCase__ = line[0]
examples.append(InputExample(guid=_lowerCAmelCase ,text_a=_lowerCAmelCase ,text_b=_lowerCAmelCase ,label=_lowerCAmelCase ,pairID=_lowerCAmelCase ) )
return examples
def A__ ( __lowerCAmelCase : List[InputExample] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : PreTrainedTokenizer , ):
lowerCamelCase__ = {label: i for i, label in enumerate(__lowerCAmelCase )}
lowerCamelCase__ = []
for ex_index, example in tqdm.tqdm(enumerate(__lowerCAmelCase ) , desc="""convert examples to features""" ):
if ex_index % 1_0000 == 0:
logger.info("""Writing example %d""" % (ex_index) )
lowerCamelCase__ = tokenizer(
example.text_a , example.text_b , add_special_tokens=__lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" , truncation=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , )
lowerCamelCase__ = label_map[example.label] if example.label in label_map else 0
lowerCamelCase__ = int(example.pairID )
features.append(InputFeatures(**__lowerCAmelCase , label=__lowerCAmelCase , pairID=__lowerCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(F'''guid: {example}''' )
logger.info(F'''features: {features[i]}''' )
return features
UpperCamelCase : List[Any] = {
'hans': 3,
}
UpperCamelCase : Tuple = {
'hans': HansProcessor,
}
| 708 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase : List[Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ (a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = GPTSwaTokenizer
_UpperCamelCase = False
_UpperCamelCase = True
_UpperCamelCase = False
def UpperCamelCase_ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ,eos_token="""<unk>""" ,bos_token="""<unk>""" ,pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = """This is a test"""
lowerCamelCase__ = """This is a test"""
return input_text, output_text
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """<s>"""
lowerCamelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) ,_lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""j""" )
self.assertEqual(len(_lowerCAmelCase ) ,20_00 )
def UpperCamelCase_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,20_00 )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase )
lowerCamelCase__ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowerCAmelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) ,[4_65, 2_87, 2_65, 6_31, 8_42] )
lowerCamelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
_lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ,)
# fmt: on
lowerCamelCase__ = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase ,[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] ,)
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
# fmt: off
self.assertListEqual(
_lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def UpperCamelCase_ ( self ):
lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase )
lowerCamelCase__ = ["""This is a test""", """I was born in 92000, and this is falsé."""]
lowerCamelCase__ = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ):
self.assertListEqual(tokenizer.encode_fast(_lowerCAmelCase ) ,_lowerCAmelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ):
self.assertEqual(tokenizer.decode_fast(_lowerCAmelCase ) ,_lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
lowerCamelCase__ = {"""input_ids""": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase ,model_name="""AI-Sweden/gpt-sw3-126m""" ,sequences=_lowerCAmelCase ,)
| 9 | 0 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = 0
while number > 0:
lowerCamelCase__ = number % 10
sum_of_digits += last_digit
lowerCamelCase__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def A__ ( __lowerCAmelCase : int = 100 ):
lowerCamelCase__ = factorial(__lowerCAmelCase )
lowerCamelCase__ = split_and_add(__lowerCAmelCase )
return result
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 709 |
'''simple docstring'''
from manim import *
class UpperCamelCase__ (a ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
lowerCamelCase__ = Rectangle(height=0.5 ,width=0.5 )
lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = VGroup(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""CPU""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCAmelCase )
lowerCamelCase__ = [mem.copy() for i in range(1 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""GPU""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
gpu.align_to(_lowerCAmelCase ,_lowerCAmelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_lowerCAmelCase )
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""Model""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,)
lowerCamelCase__ = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' ,font_size=24 ,)
lowerCamelCase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase__ = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCAmelCase ,run_time=2.5 ) ,Write(_lowerCAmelCase ) ,Write(_lowerCAmelCase ) )
self.add(_lowerCAmelCase )
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
for i, rect in enumerate(_lowerCAmelCase ):
lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase ,opacity=0.7 )
cpu_target.move_to(_lowerCAmelCase )
cpu_target.generate_target()
lowerCamelCase__ = 0.46 / 4
lowerCamelCase__ = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=_lowerCAmelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=_lowerCAmelCase ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=_lowerCAmelCase ,buff=0.0 )
cpu_targs.append(_lowerCAmelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCAmelCase ) )
second_animations.append(MoveToTarget(_lowerCAmelCase ,run_time=1.5 ) )
self.play(*_lowerCAmelCase )
self.play(*_lowerCAmelCase )
self.wait()
| 9 | 0 |
'''simple docstring'''
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
UpperCamelCase : Optional[Any] = NewType('DataClass', Any)
UpperCamelCase : Dict = NewType('DataClassType', Any)
def A__ ( __lowerCAmelCase ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def A__ ( __lowerCAmelCase ):
lowerCamelCase__ = {str(__lowerCAmelCase ): choice for choice in choices}
return lambda __lowerCAmelCase : str_to_choice.get(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( *,
__lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = dataclasses.MISSING , __lowerCAmelCase = dataclasses.MISSING , __lowerCAmelCase = None , **__lowerCAmelCase , ):
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowerCamelCase__ = {}
if aliases is not None:
lowerCamelCase__ = aliases
if help is not None:
lowerCamelCase__ = help
return dataclasses.field(metadata=__lowerCAmelCase , default=__lowerCAmelCase , default_factory=__lowerCAmelCase , **__lowerCAmelCase )
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 42
def __init__( self ,_lowerCAmelCase ,**_lowerCAmelCase ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
lowerCamelCase__ = ArgumentDefaultsHelpFormatter
super().__init__(**_lowerCAmelCase )
if dataclasses.is_dataclass(_lowerCAmelCase ):
lowerCamelCase__ = [dataclass_types]
lowerCamelCase__ = list(_lowerCAmelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_lowerCAmelCase )
@staticmethod
def UpperCamelCase_ ( _lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = F'''--{field.name}'''
lowerCamelCase__ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type ,_lowerCAmelCase ):
raise RuntimeError(
"""Unresolved type detected, which should have been done with the help of """
"""`typing.get_type_hints` method by default""" )
lowerCamelCase__ = kwargs.pop("""aliases""" ,[] )
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = [aliases]
lowerCamelCase__ = getattr(field.type ,"""__origin__""" ,field.type )
if origin_type is Union or (hasattr(_lowerCAmelCase ,"""UnionType""" ) and isinstance(_lowerCAmelCase ,types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_lowerCAmelCase ) not in field.type.__args__
):
raise ValueError(
"""Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"""
""" the argument parser only supports one type per argument."""
F''' Problem encountered in field \'{field.name}\'.''' )
if type(_lowerCAmelCase ) not in field.type.__args__:
# filter `str` in Union
lowerCamelCase__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowerCamelCase__ = getattr(field.type ,"""__origin__""" ,field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowerCamelCase__ = (
field.type.__args__[0] if isinstance(_lowerCAmelCase ,field.type.__args__[1] ) else field.type.__args__[1]
)
lowerCamelCase__ = getattr(field.type ,"""__origin__""" ,field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowerCamelCase__ = {}
if origin_type is Literal or (isinstance(field.type ,_lowerCAmelCase ) and issubclass(field.type ,_lowerCAmelCase )):
if origin_type is Literal:
lowerCamelCase__ = field.type.__args__
else:
lowerCamelCase__ = [x.value for x in field.type]
lowerCamelCase__ = make_choice_type_function(kwargs["""choices"""] )
if field.default is not dataclasses.MISSING:
lowerCamelCase__ = field.default
else:
lowerCamelCase__ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowerCamelCase__ = copy(_lowerCAmelCase )
# Hack because type=bool in argparse does not behave as we want.
lowerCamelCase__ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowerCamelCase__ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowerCamelCase__ = default
# This tells argparse we accept 0 or 1 value after --field_name
lowerCamelCase__ = """?"""
# This is the value that will get picked if we do --field_name (without value)
lowerCamelCase__ = True
elif isclass(_lowerCAmelCase ) and issubclass(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = field.type.__args__[0]
lowerCamelCase__ = """+"""
if field.default_factory is not dataclasses.MISSING:
lowerCamelCase__ = field.default_factory()
elif field.default is dataclasses.MISSING:
lowerCamelCase__ = True
else:
lowerCamelCase__ = field.type
if field.default is not dataclasses.MISSING:
lowerCamelCase__ = field.default
elif field.default_factory is not dataclasses.MISSING:
lowerCamelCase__ = field.default_factory()
else:
lowerCamelCase__ = True
parser.add_argument(_lowerCAmelCase ,*_lowerCAmelCase ,**_lowerCAmelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowerCamelCase__ = False
parser.add_argument(F'''--no_{field.name}''' ,action="""store_false""" ,dest=field.name ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if hasattr(_lowerCAmelCase ,"""_argument_group_name""" ):
lowerCamelCase__ = self.add_argument_group(dtype._argument_group_name )
else:
lowerCamelCase__ = self
try:
lowerCamelCase__ = get_type_hints(_lowerCAmelCase )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
"""removing line of `from __future__ import annotations` which opts in Postponed """
"""Evaluation of Annotations (PEP 563)""" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_lowerCAmelCase ):
lowerCamelCase__ = """.""".join(map(_lowerCAmelCase ,sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
"""line of `from __future__ import annotations` which opts in union types as """
"""`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """
"""support Python versions that lower than 3.10, you need to use """
"""`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """
"""`X | None`.""" ) from ex
raise
for field in dataclasses.fields(_lowerCAmelCase ):
if not field.init:
continue
lowerCamelCase__ = type_hints[field.name]
self._parse_dataclass_field(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase=None ,_lowerCAmelCase=False ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowerCamelCase__ = []
if args_filename:
args_files.append(Path(_lowerCAmelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowerCamelCase__ = ArgumentParser()
args_file_parser.add_argument(_lowerCAmelCase ,type=_lowerCAmelCase ,action="""append""" )
# Use only remaining args for further parsing (remove the args_file_flag)
lowerCamelCase__ , lowerCamelCase__ = args_file_parser.parse_known_args(args=_lowerCAmelCase )
lowerCamelCase__ = vars(_lowerCAmelCase ).get(args_file_flag.lstrip("""-""" ) ,_lowerCAmelCase )
if cmd_args_file_paths:
args_files.extend([Path(_lowerCAmelCase ) for p in cmd_args_file_paths] )
lowerCamelCase__ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowerCamelCase__ = file_args + args if args is not None else file_args + sys.argv[1:]
lowerCamelCase__ , lowerCamelCase__ = self.parse_known_args(args=_lowerCAmelCase )
lowerCamelCase__ = []
for dtype in self.dataclass_types:
lowerCamelCase__ = {f.name for f in dataclasses.fields(_lowerCAmelCase ) if f.init}
lowerCamelCase__ = {k: v for k, v in vars(_lowerCAmelCase ).items() if k in keys}
for k in keys:
delattr(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = dtype(**_lowerCAmelCase )
outputs.append(_lowerCAmelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_lowerCAmelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = False ):
lowerCamelCase__ = set(args.keys() )
lowerCamelCase__ = []
for dtype in self.dataclass_types:
lowerCamelCase__ = {f.name for f in dataclasses.fields(_lowerCAmelCase ) if f.init}
lowerCamelCase__ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowerCamelCase__ = dtype(**_lowerCAmelCase )
outputs.append(_lowerCAmelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(_lowerCAmelCase )}''' )
return tuple(_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = False ):
with open(Path(_lowerCAmelCase ) ,encoding="""utf-8""" ) as open_json_file:
lowerCamelCase__ = json.loads(open_json_file.read() )
lowerCamelCase__ = self.parse_dict(_lowerCAmelCase ,allow_extra_keys=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = False ):
lowerCamelCase__ = self.parse_dict(yaml.safe_load(Path(_lowerCAmelCase ).read_text() ) ,allow_extra_keys=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 710 |
'''simple docstring'''
UpperCamelCase : Tuple = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
UpperCamelCase : list[bool | None] = [None] * 10_00_00_00
UpperCamelCase : Tuple = True
UpperCamelCase : Optional[int] = False
def A__ ( __lowerCAmelCase : int ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase__ = chain(next_number(__lowerCAmelCase ) )
lowerCamelCase__ = number_chain
while number < 1000_0000:
lowerCamelCase__ = number_chain
number *= 10
return number_chain
def A__ ( __lowerCAmelCase : int = 1000_0000 ):
for i in range(1 , __lowerCAmelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution() = }')
| 9 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.