code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
def _lowerCAmelCase ( lowercase : int = 5_0 ) ->int:
"""simple docstring"""
lowercase__ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 161 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
_lowerCAmelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def _lowerCAmelCase ( lowercase : Optional[int] , lowercase : tuple , lowercase : Path , lowercase : Tuple , lowercase : Optional[Any] , lowercase : Optional[Any] , lowercase : Tuple , lowercase : int=False , ) ->Tuple:
"""simple docstring"""
output_path.parent.mkdir(parents=lowercase , exist_ok=lowercase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowercase , lowercase , f=output_path.as_posix() , input_names=lowercase , output_names=lowercase , dynamic_axes=lowercase , do_constant_folding=lowercase , use_external_data_format=lowercase , enable_onnx_checker=lowercase , opset_version=lowercase , )
else:
export(
lowercase , lowercase , f=output_path.as_posix() , input_names=lowercase , output_names=lowercase , dynamic_axes=lowercase , do_constant_folding=lowercase , opset_version=lowercase , )
@torch.no_grad()
def _lowerCAmelCase ( lowercase : str , lowercase : str , lowercase : int , lowercase : bool = False ) ->Union[str, Any]:
"""simple docstring"""
lowercase__ = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowercase__ = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
lowercase__ = '''cpu'''
lowercase__ = Path(lowercase )
# VAE DECODER
lowercase__ = AutoencoderKL.from_pretrained(model_path + '''/vae''' )
lowercase__ = vae_decoder.config.latent_channels
# forward only through the decoder part
lowercase__ = vae_decoder.decode
onnx_export(
lowercase , model_args=(
torch.randn(1 , lowercase , 2_5 , 2_5 ).to(device=lowercase , dtype=lowercase ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=lowercase , )
del vae_decoder
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=1_4,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
_lowerCAmelCase = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 161 | 1 |
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = [1]
for i in range(2 , _UpperCAmelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__a = []
__a = list(range(_UpperCAmelCase ) )
# Find permutation
while factorials:
__a = factorials.pop()
__a , __a = divmod(_UpperCAmelCase , _UpperCAmelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__snake_case :int = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class _A ( tr.AbstractTransform ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str = " "):
'''simple docstring'''
__a = sentence_delimiter
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
return list(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a = []
for sent_idx, sentence in enumerate(__SCREAMING_SNAKE_CASE):
chars.extend(self.process_string(__SCREAMING_SNAKE_CASE))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__SCREAMING_SNAKE_CASE) - 1:
chars.append(self.sentence_delimiter)
return chars
__snake_case :Any = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__snake_case :Optional[int] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__snake_case :Optional[int] = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__snake_case :Tuple = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
__snake_case :Tuple = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict=False):
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , )["wer"]
__a = 0
__a = 0
for prediction, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = jiwer.compute_measures(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 60 | 0 |
'''simple docstring'''
import os
import sys
import unittest
a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
a = os.path.join(git_repo_path, 'src', 'transformers')
a = '\n{0} = None\n'
a = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
a = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class a_ ( unittest.TestCase ):
def UpperCamelCase ( self : Dict ) -> Dict:
snake_case: Union[str, Any] =find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(a_ )
snake_case: Optional[Any] =find_backend(' if not is_tokenizers_available():' )
self.assertEqual(a_ , 'tokenizers' )
snake_case: Tuple =find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(a_ , 'tensorflow_text' )
snake_case: Union[str, Any] =find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(a_ , 'sentencepiece_and_tokenizers' )
snake_case: Optional[Any] =find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(a_ , 'sentencepiece_and_tensorflow_text' )
snake_case: Optional[int] =find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(a_ , 'sentencepiece_and_tokenizers_and_vision' )
def UpperCamelCase ( self : Union[str, Any] ) -> Any:
snake_case: int =read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , a_ )
self.assertIn('tensorflow_text' , a_ )
self.assertIn('sentencepiece_and_tokenizers' , a_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
snake_case: List[Any] =create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(a_ , '\nCONSTANT = None\n' )
snake_case: Tuple =create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
a_ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
snake_case: List[Any] ='\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
snake_case: Tuple =create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(a_ , a_ )
def UpperCamelCase ( self : Dict ) -> Any:
snake_case: Union[str, Any] ='# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
snake_case: Union[str, Any] =create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , a_ )
| 350 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( snake_case ):
UpperCAmelCase : str = (CMStochasticIterativeScheduler,)
UpperCAmelCase : int = 10
def UpperCamelCase ( self : Dict , **a_ : List[str] ) -> Any:
snake_case: Any ={
'num_train_timesteps': 2_0_1,
'sigma_min': 0.0_0_2,
'sigma_max': 8_0.0,
}
config.update(**a_ )
return config
def UpperCamelCase ( self : List[Any] ) -> List[Any]:
snake_case: Any =1_0
snake_case: List[str] =self.get_scheduler_config()
snake_case: List[Any] =self.scheduler_classes[0](**a_ )
scheduler.set_timesteps(a_ )
snake_case: Dict =scheduler.timesteps[0]
snake_case: Union[str, Any] =scheduler.timesteps[1]
snake_case: List[str] =self.dummy_sample
snake_case: List[str] =0.1 * sample
snake_case: int =scheduler.step(a_ , a_ , a_ ).prev_sample
snake_case: Optional[Any] =scheduler.step(a_ , a_ , a_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase ( self : int ) -> int:
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=a_ )
def UpperCamelCase ( self : Optional[Any] ) -> Dict:
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=a_ )
def UpperCamelCase ( self : Tuple ) -> List[str]:
snake_case: List[Any] =self.scheduler_classes[0]
snake_case: List[Any] =self.get_scheduler_config()
snake_case: Any =scheduler_class(**a_ )
snake_case: Dict =1
scheduler.set_timesteps(a_ )
snake_case: List[Any] =scheduler.timesteps
snake_case: Optional[Any] =torch.manual_seed(0 )
snake_case: Optional[Any] =self.dummy_model()
snake_case: List[Any] =self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(a_ ):
# 1. scale model input
snake_case: Any =scheduler.scale_model_input(a_ , a_ )
# 2. predict noise residual
snake_case: List[str] =model(a_ , a_ )
# 3. predict previous sample x_t-1
snake_case: Dict =scheduler.step(a_ , a_ , a_ , generator=a_ ).prev_sample
snake_case: List[Any] =pred_prev_sample
snake_case: Optional[Any] =torch.sum(torch.abs(a_ ) )
snake_case: Optional[Any] =torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 1_9_2.7_6_1_4 ) < 1E-2
assert abs(result_mean.item() - 0.2_5_1_0 ) < 1E-3
def UpperCamelCase ( self : Dict ) -> Union[str, Any]:
snake_case: Dict =self.scheduler_classes[0]
snake_case: Tuple =self.get_scheduler_config()
snake_case: str =scheduler_class(**a_ )
snake_case: List[Any] =[1_0_6, 0]
scheduler.set_timesteps(timesteps=a_ )
snake_case: Optional[Any] =scheduler.timesteps
snake_case: Dict =torch.manual_seed(0 )
snake_case: Optional[int] =self.dummy_model()
snake_case: Any =self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
snake_case: List[Any] =scheduler.scale_model_input(a_ , a_ )
# 2. predict noise residual
snake_case: Any =model(a_ , a_ )
# 3. predict previous sample x_t-1
snake_case: List[str] =scheduler.step(a_ , a_ , a_ , generator=a_ ).prev_sample
snake_case: Optional[Any] =pred_prev_sample
snake_case: Union[str, Any] =torch.sum(torch.abs(a_ ) )
snake_case: Tuple =torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 3_4_7.6_3_5_7 ) < 1E-2
assert abs(result_mean.item() - 0.4_5_2_7 ) < 1E-3
def UpperCamelCase ( self : int ) -> Tuple:
snake_case: List[Any] =self.scheduler_classes[0]
snake_case: Union[str, Any] =self.get_scheduler_config()
snake_case: str =scheduler_class(**a_ )
snake_case: str =[3_9, 3_0, 1_2, 1_5, 0]
with self.assertRaises(a_ , msg='`timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=a_ )
def UpperCamelCase ( self : Dict ) -> Optional[int]:
snake_case: Optional[Any] =self.scheduler_classes[0]
snake_case: Dict =self.get_scheduler_config()
snake_case: str =scheduler_class(**a_ )
snake_case: Any =[3_9, 3_0, 1_2, 1, 0]
snake_case: List[Any] =len(a_ )
with self.assertRaises(a_ , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=a_ , timesteps=a_ )
def UpperCamelCase ( self : Optional[Any] ) -> Tuple:
snake_case: Any =self.scheduler_classes[0]
snake_case: int =self.get_scheduler_config()
snake_case: Optional[Any] =scheduler_class(**a_ )
snake_case: List[Any] =[scheduler.config.num_train_timesteps]
with self.assertRaises(
a_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=a_ )
| 350 | 1 |
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCamelCase__ : Optional[int] = getLogger(__name__)
def UpperCAmelCase ( a_ , a_ , a_ , a_ = 8 , a_ = 1_0_2_4 , a_="val" , a_=None , a_=False , a_="summarization" , a_=None , a_=1 , a_ = None , a_="" , **a_ , ) -> Optional[Any]:
"""simple docstring"""
A_ : Dict = str(SCREAMING_SNAKE_CASE_ )
assert local_rank is not None
torch.distributed.init_process_group(backend="""nccl""" , rank=SCREAMING_SNAKE_CASE_ )
A_ : List[Any] = Path(SCREAMING_SNAKE_CASE_ )
A_ : Union[str, Any] = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(SCREAMING_SNAKE_CASE_ )
A_ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ ).cuda()
if fpaa:
A_ : Any = model.half()
# determine if we need to increase num_beams
use_task_specific_params(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # update config with task specific params
A_ : Dict = generate_kwargs.pop("""num_beams""" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
A_ : List[Any] = num_return_sequences
A_ : List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
A_ : Union[str, Any] = tokenizer.model_max_length
if prefix is None:
A_ : Dict = prefix or getattr(model.config , """prefix""" , """""" ) or """"""
A_ : Optional[int] = SeqaSeqDataset(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , max_target_length=1_0_2_4 , type_path=SCREAMING_SNAKE_CASE_ , n_obs=SCREAMING_SNAKE_CASE_ , prefix=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
A_ : str = ds.make_sortish_sampler(SCREAMING_SNAKE_CASE_ , distributed=SCREAMING_SNAKE_CASE_ , add_extra_examples=SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ )
A_ : List[Any] = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , collate_fn=ds.collate_fn )
A_ : str = []
for batch in tqdm(SCREAMING_SNAKE_CASE_ ):
A_ : Any = model.generate(
input_ids=batch["""input_ids"""].to(model.device ) , attention_mask=batch["""attention_mask"""].to(model.device ) , num_return_sequences=SCREAMING_SNAKE_CASE_ , num_beams=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
A_ : List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
A_ : Dict = batch["""ids"""]
if num_return_sequences > 1:
A_ : Union[str, Any] = chunks(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(SCREAMING_SNAKE_CASE_ ):
results.append({"""pred""": pred, """id""": ids[i].item()} )
save_json(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return results, sampler.num_replicas
def UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A_ : Optional[Any] = argparse.ArgumentParser(
epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" )
parser.add_argument("""--data_dir""" , type=SCREAMING_SNAKE_CASE_ , help="""like cnn_dm/test.source""" )
parser.add_argument(
"""--model_name""" , type=SCREAMING_SNAKE_CASE_ , help="""like facebook/bart-large-cnn,t5-base, etc.""" , default="""sshleifer/distilbart-xsum-12-3""" , )
parser.add_argument("""--save_dir""" , type=SCREAMING_SNAKE_CASE_ , help="""where to save""" , default="""tmp_gen""" )
parser.add_argument("""--max_source_length""" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ )
parser.add_argument(
"""--type_path""" , type=SCREAMING_SNAKE_CASE_ , default="""test""" , help="""which subset to evaluate typically train/val/test""" )
parser.add_argument("""--task""" , type=SCREAMING_SNAKE_CASE_ , default="""summarization""" , help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" , type=SCREAMING_SNAKE_CASE_ , default=8 , required=SCREAMING_SNAKE_CASE_ , help="""batch size""" )
parser.add_argument(
"""--local_rank""" , type=SCREAMING_SNAKE_CASE_ , default=-1 , required=SCREAMING_SNAKE_CASE_ , help="""should be passed by distributed.launch""" )
parser.add_argument(
"""--n_obs""" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="""How many observations. Defaults to all.""" )
parser.add_argument(
"""--num_return_sequences""" , type=SCREAMING_SNAKE_CASE_ , default=1 , required=SCREAMING_SNAKE_CASE_ , help="""How many sequences to return""" )
parser.add_argument(
"""--sync_timeout""" , type=SCREAMING_SNAKE_CASE_ , default=6_0_0 , required=SCREAMING_SNAKE_CASE_ , help="""How long should master process wait for other processes to finish.""" , )
parser.add_argument("""--src_lang""" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ )
parser.add_argument("""--tgt_lang""" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ )
parser.add_argument(
"""--prefix""" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="""will be added to the begininng of src examples""" )
parser.add_argument("""--fp16""" , action="""store_true""" )
parser.add_argument("""--debug""" , action="""store_true""" )
A_ : Optional[int] = time.time()
A_ , A_ : Any = parser.parse_known_args()
A_ : Dict = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE_ )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
A_ : List[Any] = Path(args.save_dir + """_tmp""" )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) # this handles locking.
A_ : Optional[Any] = list(json_save_dir.glob("""rank_*.json""" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
A_ : List[Any] = {}
if args.src_lang is not None:
A_ : Optional[int] = args.src_lang
if args.tgt_lang is not None:
A_ : int = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
A_ , A_ : List[str] = eval_data_dir(
args.data_dir , SCREAMING_SNAKE_CASE_ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
if args.local_rank <= 0:
A_ : Optional[int] = Path(args.save_dir )
save_dir.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
A_ : Dict = gather_results_from_each_node(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , args.sync_timeout )
A_ : Optional[int] = combine_partial_results(SCREAMING_SNAKE_CASE_ )
if args.num_return_sequences > 1:
A_ : List[Any] = save_dir.joinpath("""pseudolabel_results.json""" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return
A_ : Union[str, Any] = Path(args.data_dir ).joinpath(args.type_path + """.target""" )
with open(SCREAMING_SNAKE_CASE_ ) as f:
A_ : Any = [x.rstrip() for x in f.readlines()][: len(SCREAMING_SNAKE_CASE_ )]
# Calculate metrics, save metrics, and save _generations.txt
A_ : Optional[int] = """translation""" in args.task
A_ : int = calculate_bleu if calc_bleu else calculate_rouge
A_ : Union[str, Any] = """bleu""" if calc_bleu else """rouge"""
A_ : List[str] = score_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A_ : str = len(SCREAMING_SNAKE_CASE_ )
A_ : Tuple = time.time() - start_time
A_ : Optional[int] = round(runtime / metrics["""n_obs"""] , 4 )
A_ : List[Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
A_ : List[str] = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , indent=SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
write_txt_file(SCREAMING_SNAKE_CASE_ , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(SCREAMING_SNAKE_CASE_ , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
A_ : Union[str, Any] = []
for partial_result in partial_results:
records.extend(SCREAMING_SNAKE_CASE_ )
A_ : Union[str, Any] = sorted(SCREAMING_SNAKE_CASE_ , key=lambda a_ : x["id"] )
A_ : Optional[Any] = [x["""pred"""] for x in records]
return preds
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[str]:
"""simple docstring"""
A_ : Dict = time.time()
logger.info("""waiting for all nodes to finish""" )
A_ : Union[str, Any] = None
while (time.time() - start_wait) < timeout:
A_ : Union[str, Any] = list(save_dir.glob("""rank_*.json""" ) )
if len(SCREAMING_SNAKE_CASE_ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
A_ : List[Any] = lmap(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("""Rank 0 gave up on waiting for other processes""" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 715 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : str = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''marian'''
lowerCamelCase = ['''past_key_values''']
lowerCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , _lowerCamelCase=5_8101 , _lowerCamelCase=None , _lowerCamelCase=1024 , _lowerCamelCase=12 , _lowerCamelCase=4096 , _lowerCamelCase=16 , _lowerCamelCase=12 , _lowerCamelCase=4096 , _lowerCamelCase=16 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="gelu" , _lowerCamelCase=1024 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=5_8100 , _lowerCamelCase=False , _lowerCamelCase=5_8100 , _lowerCamelCase=0 , _lowerCamelCase=0 , _lowerCamelCase=True , **_lowerCamelCase , ) -> Any:
A_ : Any = vocab_size
A_ : int = decoder_vocab_size or vocab_size
A_ : Union[str, Any] = max_position_embeddings
A_ : Optional[Any] = d_model
A_ : List[str] = encoder_ffn_dim
A_ : List[Any] = encoder_layers
A_ : Any = encoder_attention_heads
A_ : List[Any] = decoder_ffn_dim
A_ : Tuple = decoder_layers
A_ : Optional[int] = decoder_attention_heads
A_ : Any = dropout
A_ : Optional[Any] = attention_dropout
A_ : Tuple = activation_dropout
A_ : List[str] = activation_function
A_ : int = init_std
A_ : Tuple = encoder_layerdrop
A_ : Optional[int] = decoder_layerdrop
A_ : List[Any] = use_cache
A_ : Any = encoder_layers
A_ : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
A_ : Dict = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , forced_eos_token_id=_lowerCamelCase , **_lowerCamelCase , )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
A_ : str = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
A_ : Union[str, Any] = {0: """batch"""}
A_ : List[str] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
A_ : Union[str, Any] = {0: """batch""", 1: """decoder_sequence"""}
A_ : str = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_lowerCamelCase , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
A_ : Tuple = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
A_ , A_ : Optional[Any] = self.num_layers
for i in range(_lowerCamelCase ):
A_ : str = {0: """batch""", 2: """past_sequence + sequence"""}
A_ : str = {0: """batch""", 2: """past_sequence + sequence"""}
else:
A_ : Any = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
A_ : str = super().outputs
else:
A_ : Optional[int] = super(_lowerCamelCase , self ).outputs
if self.use_past:
A_ , A_ : List[str] = self.num_layers
for i in range(_lowerCamelCase ):
A_ : Tuple = {0: """batch""", 2: """past_sequence + sequence"""}
A_ : Optional[int] = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ) -> Mapping[str, Any]:
A_ : List[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Generate decoder inputs
A_ : int = seq_length if not self.use_past else 1
A_ : Union[str, Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : int = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
A_ : Tuple = dict(**_lowerCamelCase , **_lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
A_ , A_ : List[str] = common_inputs["""input_ids"""].shape
A_ : Union[str, Any] = common_inputs["""decoder_input_ids"""].shape[1]
A_ , A_ : List[str] = self.num_attention_heads
A_ : List[str] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A_ : Union[str, Any] = decoder_seq_length + 3
A_ : Union[str, Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A_ : List[str] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(_lowerCamelCase , _lowerCamelCase )] , dim=1 )
A_ : Optional[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A_ , A_ : Optional[Any] = self.num_layers
A_ : Optional[int] = min(_lowerCamelCase , _lowerCamelCase )
A_ : Union[str, Any] = max(_lowerCamelCase , _lowerCamelCase ) - min_num_layers
A_ : Any = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(_lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
) )
# TODO: test this.
A_ : Optional[int] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(_lowerCamelCase , _lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) )
return common_inputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ) -> Mapping[str, Any]:
A_ : Union[str, Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
A_ , A_ : List[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
A_ : Dict = seqlen + 2
A_ , A_ : Any = self.num_layers
A_ , A_ : Optional[int] = self.num_attention_heads
A_ : List[str] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A_ : Tuple = common_inputs["""attention_mask"""].dtype
A_ : Optional[int] = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(_lowerCamelCase , _lowerCamelCase , dtype=_lowerCamelCase )] , dim=1 )
A_ : List[Any] = [
(torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) for _ in range(_lowerCamelCase )
]
return common_inputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A_ : List[str] = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A_ : Optional[int] = tokenizer.num_special_tokens_to_add(_lowerCamelCase )
A_ : Optional[Any] = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
A_ : str = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
A_ : int = dict(tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase ) )
return common_inputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
A_ : List[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
else:
A_ : Dict = self._generate_dummy_inputs_for_causal_lm(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
return common_inputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
if self.task in ["default", "seq2seq-lm"]:
A_ : Optional[Any] = super()._flatten_past_key_values_(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
A_ : List[str] = super(_lowerCamelCase , self )._flatten_past_key_values_(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
@property
def UpperCAmelCase_ ( self ) -> float:
return 1e-4
| 385 | 0 |
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
__lowercase , __lowercase = get_aligned_output_features_output_indices(lowercase__ ,lowercase__ ,lowercase__ )
self.assertEqual(lowercase__ ,['''c'''] )
self.assertEqual(lowercase__ ,[2] )
# Out indices set to match out features
__lowercase , __lowercase = get_aligned_output_features_output_indices(['''a''', '''c'''] ,lowercase__ ,lowercase__ )
self.assertEqual(lowercase__ ,['''a''', '''c'''] )
self.assertEqual(lowercase__ ,[0, 2] )
# Out features set to match out indices
__lowercase , __lowercase = get_aligned_output_features_output_indices(lowercase__ ,[0, 2] ,lowercase__ )
self.assertEqual(lowercase__ ,['''a''', '''c'''] )
self.assertEqual(lowercase__ ,[0, 2] )
# Out features selected from negative indices
__lowercase , __lowercase = get_aligned_output_features_output_indices(lowercase__ ,[-3, -1] ,lowercase__ )
self.assertEqual(lowercase__ ,['''a''', '''c'''] )
self.assertEqual(lowercase__ ,[-3, -1] )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
# Stage names must be set
with self.assertRaises(lowercase__ ):
verify_out_features_out_indices(['''a''', '''b'''] ,(0, 1) ,lowercase__ )
# Out features must be a list
with self.assertRaises(lowercase__ ):
verify_out_features_out_indices(('''a''', '''b''') ,(0, 1) ,['''a''', '''b'''] )
# Out features must be a subset of stage names
with self.assertRaises(lowercase__ ):
verify_out_features_out_indices(['''a''', '''b'''] ,(0, 1) ,['''a'''] )
# Out indices must be a list or tuple
with self.assertRaises(lowercase__ ):
verify_out_features_out_indices(lowercase__ ,0 ,['''a''', '''b'''] )
# Out indices must be a subset of stage names
with self.assertRaises(lowercase__ ):
verify_out_features_out_indices(lowercase__ ,(0, 1) ,['''a'''] )
# Out features and out indices must be the same length
with self.assertRaises(lowercase__ ):
verify_out_features_out_indices(['''a''', '''b'''] ,(0,) ,['''a''', '''b''', '''c'''] )
# Out features should match out indices
with self.assertRaises(lowercase__ ):
verify_out_features_out_indices(['''a''', '''b'''] ,(0, 2) ,['''a''', '''b''', '''c'''] )
# Out features and out indices should be in order
with self.assertRaises(lowercase__ ):
verify_out_features_out_indices(['''b''', '''a'''] ,(0, 1) ,['''a''', '''b'''] )
# Check passes with valid inputs
verify_out_features_out_indices(['''a''', '''b''', '''d'''] ,(0, 1, -1) ,['''a''', '''b''', '''c''', '''d'''] )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = BackboneMixin()
__lowercase = ['''a''', '''b''', '''c''']
__lowercase = ['''a''', '''c''']
__lowercase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features ,['''a''', '''c'''] )
self.assertEqual(backbone.out_indices ,[0, 2] )
# Check out features and indices are updated correctly
__lowercase = ['''a''', '''b''']
self.assertEqual(backbone.out_features ,['''a''', '''b'''] )
self.assertEqual(backbone.out_indices ,[0, 1] )
__lowercase = [-3, -1]
self.assertEqual(backbone.out_features ,['''a''', '''c'''] )
self.assertEqual(backbone.out_indices ,[-3, -1] )
| 41 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
for attribute in key.split(""".""" ):
UpperCAmelCase = getattr(_snake_case , _snake_case )
if weight_type is not None:
UpperCAmelCase = getattr(_snake_case , _snake_case ).shape
else:
UpperCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
UpperCAmelCase = value
elif weight_type == "weight_g":
UpperCAmelCase = value
elif weight_type == "weight_v":
UpperCAmelCase = value
elif weight_type == "bias":
UpperCAmelCase = value
else:
UpperCAmelCase = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = fairseq_model.state_dict()
UpperCAmelCase = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == """group""" , )
UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
UpperCAmelCase = True
if "*" in mapped_key:
UpperCAmelCase = name.split(_snake_case )[0].split(""".""" )[-2]
UpperCAmelCase = mapped_key.replace("""*""" , _snake_case )
if "weight_g" in name:
UpperCAmelCase = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase = """weight_v"""
elif "weight" in name:
UpperCAmelCase = """weight"""
elif "bias" in name:
UpperCAmelCase = """bias"""
else:
UpperCAmelCase = None
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
continue
if not is_used:
unused_weights.append(_snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = full_name.split("""conv_layers.""" )[-1]
UpperCAmelCase = name.split(""".""" )
UpperCAmelCase = int(items[0] )
UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
UpperCAmelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
UpperCAmelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
UpperCAmelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
UpperCAmelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_snake_case )
@torch.no_grad()
def _a ( _snake_case , _snake_case , _snake_case=None , _snake_case=None , _snake_case=True ):
"""simple docstring"""
if config_path is not None:
UpperCAmelCase = HubertConfig.from_pretrained(_snake_case )
else:
UpperCAmelCase = HubertConfig()
if is_finetuned:
if dict_path:
UpperCAmelCase = Dictionary.load(_snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase = target_dict.pad_index
UpperCAmelCase = target_dict.bos_index
UpperCAmelCase = target_dict.eos_index
UpperCAmelCase = len(target_dict.symbols )
UpperCAmelCase = os.path.join(_snake_case , """vocab.json""" )
if not os.path.isdir(_snake_case ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_snake_case ) )
return
os.makedirs(_snake_case , exist_ok=_snake_case )
with open(_snake_case , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , _snake_case )
UpperCAmelCase = WavaVecaCTCTokenizer(
_snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_snake_case , )
UpperCAmelCase = True if config.feat_extract_norm == """layer""" else False
UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
UpperCAmelCase = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
processor.save_pretrained(_snake_case )
UpperCAmelCase = HubertForCTC(_snake_case )
else:
UpperCAmelCase = HubertModel(_snake_case )
if is_finetuned:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
UpperCAmelCase = model[0].eval()
recursively_load_weights(_snake_case , _snake_case , _snake_case )
hf_wavavec.save_pretrained(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_UpperCamelCase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 341 | 0 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
raise RuntimeError('CUDA out of memory.' )
class lowerCAmelCase ( nn.Module ):
def __init__( self ):
super().__init__()
_UpperCAmelCase = nn.Linear(3 , 4 )
_UpperCAmelCase = nn.BatchNormad(4 )
_UpperCAmelCase = nn.Linear(4 , 5 )
def __A ( self , a__ ):
return self.lineara(self.batchnorm(self.lineara(a__ ) ) )
class lowerCAmelCase ( unittest.TestCase ):
def __A ( self ):
_UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(a__ ):
nonlocal batch_sizes
batch_sizes.append(a__ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(a__ , [1_28, 64, 32, 16, 8] )
def __A ( self ):
_UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(a__ , a__ ):
nonlocal batch_sizes
batch_sizes.append(a__ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_UpperCAmelCase , _UpperCAmelCase = mock_training_loop_function('hello' )
self.assertListEqual(a__ , [1_28, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def __A ( self ):
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(a__ ):
pass
with self.assertRaises(a__ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def __A ( self ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(a__ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(a__ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def __A ( self ):
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(a__ , a__ , a__ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(a__ ) as cm:
mock_training_loop_function(1_28 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def __A ( self ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(a__ ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(a__ ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def __A ( self ):
_UpperCAmelCase = torch.cuda.memory_allocated()
_UpperCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , a__ )
_UpperCAmelCase = release_memory(a__ )
self.assertEqual(torch.cuda.memory_allocated() , a__ )
| 494 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class lowerCAmelCase ( snake_case , unittest.TestCase ):
lowerCAmelCase__ = RoFormerTokenizer
lowerCAmelCase__ = RoFormerTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def __A ( self ):
super().setUp()
def __A ( self , **a__ ):
return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **a__ )
def __A ( self , **a__ ):
return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **a__ )
def __A ( self ):
_UpperCAmelCase = '永和服装饰品有限公司,今天天气非常好'
_UpperCAmelCase = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'
return input_text, output_text
def __A ( self ):
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase , _UpperCAmelCase = self.get_chinese_input_output_texts()
_UpperCAmelCase = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , output_text.split() )
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self ):
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase , _UpperCAmelCase = self.get_chinese_input_output_texts()
_UpperCAmelCase = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , output_text.split() )
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self ):
pass
def __A ( self ):
pass
def __A ( self ):
pass
| 494 | 1 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase )
class _lowerCAmelCase ( __lowerCAmelCase ):
'''simple docstring'''
def __init__(self , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(_UpperCAmelCase )
def __call__(self , UpperCAmelCase , **UpperCAmelCase ) -> str:
return super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
def lowercase (self , **UpperCAmelCase ) -> Any:
return {}, {}, {}
def lowercase (self , UpperCAmelCase ) -> str:
_snake_case = load_image(_UpperCAmelCase )
_snake_case = image.size
_snake_case = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def lowercase (self , UpperCAmelCase ) -> int:
_snake_case = self.model(**_UpperCAmelCase )
return model_outputs
def lowercase (self , UpperCAmelCase ) -> str:
_snake_case = model_outputs.predicted_depth
_snake_case = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=_UpperCAmelCase )
_snake_case = prediction.squeeze().cpu().numpy()
_snake_case = (output * 255 / np.max(_UpperCAmelCase )).astype("""uint8""" )
_snake_case = Image.fromarray(_UpperCAmelCase )
_snake_case = {}
_snake_case = predicted_depth
_snake_case = depth
return output_dict | 585 |
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
a = logging.get_logger(__name__)
a = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=None , **_UpperCAmelCase : Optional[Any] ):
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
_A = model
_A = kwargs.get('model_save_dir' , _UpperCAmelCase )
_A = kwargs.get('latest_model_name' , _UpperCAmelCase )
def __call__( self : Dict , **_UpperCAmelCase : List[Any] ):
_A = {k: np.array(_UpperCAmelCase ) for k, v in kwargs.items()}
return self.model.run(_UpperCAmelCase , _UpperCAmelCase )
@staticmethod
def lowerCAmelCase_ ( _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : List[Any]=None ):
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
_A = 'CPUExecutionProvider'
return ort.InferenceSession(_UpperCAmelCase , providers=[provider] , sess_options=_UpperCAmelCase )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : Optional[str] = None , **_UpperCAmelCase : List[Any] ):
_A = file_name if file_name is not None else ONNX_WEIGHTS_NAME
_A = self.model_save_dir.joinpath(self.latest_model_name )
_A = Path(_UpperCAmelCase ).joinpath(_UpperCAmelCase )
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
_A = self.model_save_dir.joinpath(_UpperCAmelCase )
if src_path.exists():
_A = Path(_UpperCAmelCase ).joinpath(_UpperCAmelCase )
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase )
except shutil.SameFileError:
pass
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : List[str] , ):
if os.path.isfile(_UpperCAmelCase ):
logger.error(F'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
# saving model weights/files
self._save_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCAmelCase_ ( cls : Tuple , _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : Optional[Union[bool, str, None]] = None , _UpperCAmelCase : Optional[Union[str, None]] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional["ort.SessionOptions"] = None , **_UpperCAmelCase : Union[str, Any] , ):
_A = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCAmelCase ):
_A = OnnxRuntimeModel.load_model(
os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase )
_A = Path(_UpperCAmelCase )
# load model from hub
else:
# download model
_A = hf_hub_download(
repo_id=_UpperCAmelCase , filename=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , )
_A = Path(_UpperCAmelCase ).parent
_A = Path(_UpperCAmelCase ).name
_A = OnnxRuntimeModel.load_model(_UpperCAmelCase , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase )
return cls(model=_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , **_UpperCAmelCase : Tuple , ):
_A = None
if len(str(_UpperCAmelCase ).split('@' ) ) == 2:
_A , _A = model_id.split('@' )
return cls._from_pretrained(
model_id=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , **_UpperCAmelCase , )
| 7 | 0 |
"""simple docstring"""
# Lint as: python3
import itertools
import os
import re
__SCREAMING_SNAKE_CASE = re.compile(r'([A-Z]+)([A-Z][a-z])')
__SCREAMING_SNAKE_CASE = re.compile(r'([a-z\d])([A-Z])')
__SCREAMING_SNAKE_CASE = re.compile(r'(?<!_)_(?!_)')
__SCREAMING_SNAKE_CASE = re.compile(r'(_{2,})')
__SCREAMING_SNAKE_CASE = r'^\w+(\.\w+)*$'
__SCREAMING_SNAKE_CASE = r'<>:/\|?*'
def A_ ( __lowercase ):
UpperCamelCase_ : Union[str, Any] =_uppercase_uppercase_re.sub(r'\1_\2' , __lowercase )
UpperCamelCase_ : Union[str, Any] =_lowercase_uppercase_re.sub(r'\1_\2' , __lowercase )
return name.lower()
def A_ ( __lowercase ):
UpperCamelCase_ : str =_single_underscore_re.split(__lowercase )
UpperCamelCase_ : List[Any] =[_multiple_underscores_re.split(__lowercase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__lowercase ) if n != '' )
def A_ ( __lowercase ):
if os.path.basename(__lowercase ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(__lowercase )
def A_ ( __lowercase , __lowercase ):
if os.path.basename(__lowercase ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , __lowercase ):
raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return F'''{filename_prefix_for_name(__lowercase )}-{split}'''
def A_ ( __lowercase , __lowercase , __lowercase , __lowercase=None ):
UpperCamelCase_ : Tuple =filename_prefix_for_split(__lowercase , __lowercase )
if filetype_suffix:
prefix += F'''.{filetype_suffix}'''
UpperCamelCase_ : Dict =os.path.join(__lowercase , __lowercase )
return F'''{filepath}*'''
def A_ ( __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None ):
UpperCamelCase_ : int =filename_prefix_for_split(__lowercase , __lowercase )
UpperCamelCase_ : Union[str, Any] =os.path.join(__lowercase , __lowercase )
if shard_lengths:
UpperCamelCase_ : str =len(__lowercase )
UpperCamelCase_ : List[str] =[F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(__lowercase )]
if filetype_suffix:
UpperCamelCase_ : Optional[int] =[filename + F'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
UpperCamelCase_ : Tuple =prefix
if filetype_suffix:
filename += F'''.{filetype_suffix}'''
return [filename]
| 395 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def A_ ( __lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = False , ):
UpperCamelCase_ : Dict =bnb_quantization_config.load_in_abit
UpperCamelCase_ : List[str] =bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
UpperCamelCase_ : Tuple =[]
# custom device map
if isinstance(__lowercase , __lowercase ) and len(device_map.keys() ) > 1:
UpperCamelCase_ : str =[key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
UpperCamelCase_ : Any =get_keys_to_not_convert(__lowercase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__lowercase )
UpperCamelCase_ : str =bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
UpperCamelCase_ : Optional[Any] =[]
UpperCamelCase_ : Any =bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__lowercase )
# compatibility with peft
UpperCamelCase_ : str =load_in_abit
UpperCamelCase_ : Optional[int] =load_in_abit
UpperCamelCase_ : Any =get_parameter_device(__lowercase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
UpperCamelCase_ : int =replace_with_bnb_layers(__lowercase , __lowercase , modules_to_not_convert=__lowercase )
# convert param to the right dtype
UpperCamelCase_ : Dict =bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
UpperCamelCase_ : int =name.replace('.weight' , '' ).replace('.bias' , '' )
UpperCamelCase_ : Any =getattr(__lowercase , __lowercase , __lowercase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__lowercase ):
param.to(__lowercase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
UpperCamelCase_ : List[Any] =replace_with_bnb_layers(
__lowercase , __lowercase , modules_to_not_convert=__lowercase )
UpperCamelCase_ : int =get_quantized_model_device_map(
__lowercase , __lowercase , __lowercase , max_memory=__lowercase , no_split_module_classes=__lowercase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
UpperCamelCase_ : Union[str, Any] =True
UpperCamelCase_ : List[Any] =any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
__lowercase , __lowercase , __lowercase , dtype=bnb_quantization_config.torch_dtype , offload_folder=__lowercase , offload_state_dict=__lowercase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__lowercase , device_map=__lowercase , offload_dir=__lowercase )
def A_ ( __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None ):
if device_map is None:
if torch.cuda.is_available():
UpperCamelCase_ : int ={'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(__lowercase , __lowercase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
UpperCamelCase_ : Optional[Any] ={}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
UpperCamelCase_ : List[str] ={}
UpperCamelCase_ : Optional[int] =special_dtypes
UpperCamelCase_ : Any =no_split_module_classes
UpperCamelCase_ : str =bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
UpperCamelCase_ : Dict =get_balanced_memory(
__lowercase , low_zero=(device_map == 'balanced_low_0') , max_memory=__lowercase , **__lowercase , )
UpperCamelCase_ : Any =max_memory
UpperCamelCase_ : Optional[int] =infer_auto_device_map(__lowercase , **__lowercase )
if isinstance(__lowercase , __lowercase ):
# check if don't have any quantized module on the cpu
UpperCamelCase_ : Dict =bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
UpperCamelCase_ : Union[str, Any] ={
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def A_ ( __lowercase , __lowercase , __lowercase=None , __lowercase=None ):
if modules_to_not_convert is None:
UpperCamelCase_ : int =[]
UpperCamelCase_ , UpperCamelCase_ : List[Any] =_replace_with_bnb_layers(
__lowercase , __lowercase , __lowercase , __lowercase )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def A_ ( __lowercase , __lowercase , __lowercase=None , __lowercase=None , ):
UpperCamelCase_ : Optional[Any] =False
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase_ : Any =[]
current_key_name.append(__lowercase )
if isinstance(__lowercase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
UpperCamelCase_ : Union[str, Any] ='.'.join(__lowercase )
UpperCamelCase_ : Any =True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
UpperCamelCase_ : Dict =False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
UpperCamelCase_ : str =bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__lowercase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
UpperCamelCase_ : Union[str, Any] =bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
UpperCamelCase_ : Union[str, Any] =module.weight.data
if module.bias is not None:
UpperCamelCase_ : str =module.bias.data
bnb_module.requires_grad_(__lowercase )
setattr(__lowercase , __lowercase , __lowercase )
UpperCamelCase_ : Optional[int] =True
if len(list(module.children() ) ) > 0:
UpperCamelCase_ , UpperCamelCase_ : Any =_replace_with_bnb_layers(
__lowercase , __lowercase , __lowercase , __lowercase )
UpperCamelCase_ : str =has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def A_ ( __lowercase ):
# Create a copy of the model
with init_empty_weights():
UpperCamelCase_ : int =deepcopy(__lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
UpperCamelCase_ : Optional[int] =find_tied_parameters(__lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowercase , __lowercase ):
UpperCamelCase_ : List[Any] =sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCamelCase_ : List[Any] =sum(__lowercase , [] )
UpperCamelCase_ : Any =len(__lowercase ) > 0
# Check if it is a base model
UpperCamelCase_ : Optional[Any] =False
if hasattr(__lowercase , 'base_model_prefix' ):
UpperCamelCase_ : List[Any] =not hasattr(__lowercase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase_ : Optional[int] =list(model.named_children() )
UpperCamelCase_ : Union[str, Any] =[list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase_ : Optional[int] =set(__lowercase ) - set(__lowercase )
UpperCamelCase_ : List[str] =list(set(__lowercase ) ) + list(__lowercase )
# remove ".weight" from the keys
UpperCamelCase_ : Dict =['.weight', '.bias']
UpperCamelCase_ : Optional[Any] =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase_ : Dict =name.replace(__lowercase , '' )
filtered_module_names.append(__lowercase )
return filtered_module_names
def A_ ( __lowercase ):
for m in model.modules():
if isinstance(__lowercase , bnb.nn.Linearabit ):
return True
return False
def A_ ( __lowercase ):
return next(parameter.parameters() ).device
def A_ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(__lowercase , __lowercase , 0 , dtype=__lowercase , value=__lowercase )
UpperCamelCase_ : Any =param_name
UpperCamelCase_ : Optional[int] =model
if "." in tensor_name:
UpperCamelCase_ : Union[str, Any] =tensor_name.split('.' )
for split in splits[:-1]:
UpperCamelCase_ : Optional[Any] =getattr(__lowercase , __lowercase )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
UpperCamelCase_ : int =new_module
UpperCamelCase_ : int =splits[-1]
# offload weights
UpperCamelCase_ : str =False
offload_weight(module._parameters[tensor_name] , __lowercase , __lowercase , index=__lowercase )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , __lowercase , index=__lowercase , )
else:
offload_weight(__lowercase , __lowercase , __lowercase , index=__lowercase )
offload_weight(__lowercase , param_name.replace('weight' , 'SCB' ) , __lowercase , index=__lowercase )
set_module_tensor_to_device(__lowercase , __lowercase , 'meta' , dtype=__lowercase , value=torch.empty(*param.size() ) )
| 395 | 1 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
_UpperCamelCase : int
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
def UpperCamelCase_( )-> Node | None:
UpperCamelCase__ = Node(1 )
UpperCamelCase__ = Node(2 )
UpperCamelCase__ = Node(3 )
UpperCamelCase__ = Node(4 )
UpperCamelCase__ = Node(5 )
return tree
def UpperCamelCase_( _A :Node | None )-> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCamelCase_( _A :Node | None )-> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCamelCase_( _A :Node | None )-> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCamelCase_( _A :Node | None )-> int:
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def UpperCamelCase_( _A :Node | None )-> Sequence[Node | None]:
UpperCamelCase__ = []
if root is None:
return output
UpperCamelCase__ = deque([root] )
while process_queue:
UpperCamelCase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCamelCase_( _A :Node | None , _A :int )-> Sequence[Node | None]:
UpperCamelCase__ = []
def populate_output(_A :Node | None , _A :int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(_A , _A )
return output
def UpperCamelCase_( _A :Node | None , _A :int )-> Sequence[Node | None]:
UpperCamelCase__ = []
def populate_output(_A :Node | None , _A :int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(_A , _A )
return output
def UpperCamelCase_( _A :Node | None )-> Sequence[Node | None] | list[Any]:
if root is None:
return []
UpperCamelCase__ = []
UpperCamelCase__ = 0
UpperCamelCase__ = height(_A )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_A , _A ) )
UpperCamelCase__ = 1
else:
output.append(get_nodes_from_right_to_left(_A , _A ) )
UpperCamelCase__ = 0
return output
def UpperCamelCase_( )-> None: # Main function for testing.
UpperCamelCase__ = make_tree()
print(F'''In-order Traversal: {inorder(_A )}''' )
print(F'''Pre-order Traversal: {preorder(_A )}''' )
print(F'''Post-order Traversal: {postorder(_A )}''' , "\n" )
print(F'''Height of Tree: {height(_A )}''' , "\n" )
print("Complete Level Order Traversal: " )
print(level_order(_A ) , "\n" )
print("Level-wise order Traversal: " )
for level in range(1 , height(_A ) + 1 ):
print(F'''Level {level}:''' , get_nodes_from_left_to_right(_A , level=_A ) )
print("\nZigZag order Traversal: " )
print(zigzag(_A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 551 |
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case , snake_case ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=snake_case , scheduler=snake_case )
@torch.no_grad()
def __call__( self , snake_case = 1 , snake_case = None , snake_case = 50 , snake_case = "pil" , snake_case = True , **snake_case , ):
'''simple docstring'''
UpperCamelCase__ = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=snake_case , )
UpperCamelCase__ = image.to(self.device )
# set step values
self.scheduler.set_timesteps(snake_case )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase__ = self.unet(snake_case , snake_case ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(snake_case , snake_case , snake_case ).prev_sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(snake_case )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=snake_case ), "This is a local test"
| 551 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__lowercase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class a__( __A ):
'''simple docstring'''
UpperCAmelCase_ : Any = ["""pixel_values"""]
def __init__( self , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = 1 / 255 , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**UpperCamelCase__)
lowerCAmelCase = size if size is not None else {'shortest_edge': 224}
lowerCAmelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__)
lowerCAmelCase = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowerCAmelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ , param_name="""crop_size""")
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = resample
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase = do_convert_rgb
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__)
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
lowerCAmelCase = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__)
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = get_size_dict(UpperCamelCase__)
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}")
return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = ChannelDimension.FIRST , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = size if size is not None else self.size
lowerCAmelCase = get_size_dict(UpperCamelCase__ , param_name="""size""" , default_to_square=UpperCamelCase__)
lowerCAmelCase = resample if resample is not None else self.resample
lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" , default_to_square=UpperCamelCase__)
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase = image_std if image_std is not None else self.image_std
lowerCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase = make_list_of_images(UpperCamelCase__)
if not valid_images(UpperCamelCase__):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""")
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""")
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase = [convert_to_rgb(UpperCamelCase__) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase = [to_numpy_array(UpperCamelCase__) for image in images]
if do_resize:
lowerCAmelCase = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__) for image in images]
if do_center_crop:
lowerCAmelCase = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__) for image in images]
if do_rescale:
lowerCAmelCase = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__) for image in images]
if do_normalize:
lowerCAmelCase = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__) for image in images]
lowerCAmelCase = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__) for image in images]
lowerCAmelCase = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__)
| 709 | '''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
__lowercase = '''
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
'''
__lowercase = '''
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric("mean_iou")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
'''
__lowercase = '''\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}'''
def snake_case__ ( _A: Optional[Any] , _A: int , _A: int , _A: bool , _A: Optional[Dict[int, int]] = None , _A: bool = False , ) -> List[Any]:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
lowerCAmelCase = new_id
# turn into Numpy arrays
lowerCAmelCase = np.array(_A )
lowerCAmelCase = np.array(_A )
if reduce_labels:
lowerCAmelCase = 255
lowerCAmelCase = label - 1
lowerCAmelCase = 255
lowerCAmelCase = label != ignore_index
lowerCAmelCase = np.not_equal(_A , _A )
lowerCAmelCase = pred_label[mask]
lowerCAmelCase = np.array(_A )[mask]
lowerCAmelCase = pred_label[pred_label == label]
lowerCAmelCase = np.histogram(_A , bins=_A , range=(0, num_labels - 1) )[0]
lowerCAmelCase = np.histogram(_A , bins=_A , range=(0, num_labels - 1) )[0]
lowerCAmelCase = np.histogram(_A , bins=_A , range=(0, num_labels - 1) )[0]
lowerCAmelCase = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def snake_case__ ( _A: Union[str, Any] , _A: Any , _A: Union[str, Any] , _A: bool , _A: Optional[Dict[int, int]] = None , _A: bool = False , ) -> int:
'''simple docstring'''
lowerCAmelCase = np.zeros((num_labels,) , dtype=np.floataa )
lowerCAmelCase = np.zeros((num_labels,) , dtype=np.floataa )
lowerCAmelCase = np.zeros((num_labels,) , dtype=np.floataa )
lowerCAmelCase = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(_A , _A ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = intersect_and_union(
_A , _A , _A , _A , _A , _A )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def snake_case__ ( _A: Union[str, Any] , _A: int , _A: Dict , _A: bool , _A: Optional[int] = None , _A: Optional[Dict[int, int]] = None , _A: bool = False , ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = total_intersect_and_union(
_A , _A , _A , _A , _A , _A )
# compute metrics
lowerCAmelCase = {}
lowerCAmelCase = total_area_intersect.sum() / total_area_label.sum()
lowerCAmelCase = total_area_intersect / total_area_union
lowerCAmelCase = total_area_intersect / total_area_label
lowerCAmelCase = np.nanmean(_A )
lowerCAmelCase = np.nanmean(_A )
lowerCAmelCase = all_acc
lowerCAmelCase = iou
lowerCAmelCase = acc
if nan_to_num is not None:
lowerCAmelCase = {metric: np.nan_to_num(_A , nan=_A ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__( datasets.Metric ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16"""))),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16"""))),
}) , reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
] , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , ):
"""simple docstring"""
lowerCAmelCase = mean_iou(
results=__lowerCAmelCase , gt_seg_maps=__lowerCAmelCase , num_labels=__lowerCAmelCase , ignore_index=__lowerCAmelCase , nan_to_num=__lowerCAmelCase , label_map=__lowerCAmelCase , reduce_labels=__lowerCAmelCase , )
return iou_result
| 605 | 0 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self: Dict ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: int=13 ,__lowerCAmelCase: List[str]=30 ,__lowerCAmelCase: List[str]=2 ,__lowerCAmelCase: Dict=3 ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Optional[Any]=32 ,__lowerCAmelCase: List[Any]=5 ,__lowerCAmelCase: int=4 ,__lowerCAmelCase: Optional[int]=37 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Optional[Any]=10 ,__lowerCAmelCase: List[str]=0.02 ,__lowerCAmelCase: Union[str, Any]=3 ,__lowerCAmelCase: Tuple=0.6 ,__lowerCAmelCase: Dict=None ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : Any = image_size
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : str = use_labels
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Dict = mask_ratio
_lowerCamelCase : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCamelCase : str = (image_size // patch_size) ** 2
_lowerCamelCase : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def _lowercase ( self: Any ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = ViTMAEModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = (self.image_size // self.patch_size) ** 2
_lowerCamelCase : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : Tuple = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
_lowerCamelCase : Any = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = config_and_inputs
_lowerCamelCase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCAmelCase__ = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = ViTMAEModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowerCamelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase ,nn.Linear ) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCamelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
def _lowercase ( self: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : Optional[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase : Union[str, Any] = torch.from_numpy(__lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCamelCase : Dict = pt_noise
super().check_pt_tf_models(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : int = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : Any = outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = model_class.from_pretrained(__lowerCAmelCase )
model.to(__lowerCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : Dict = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
# Make sure we don't have nans
_lowerCamelCase : Union[str, Any] = after_outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCAmelCase ,1e-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: str ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def _lowercase ( self: int ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self: Dict ):
'''simple docstring'''
pass
@slow
def _lowercase ( self: Dict ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[Any] = ViTMAEModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def _lowercase ( self: int ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : List[str] = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__lowerCAmelCase )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : int = prepare_img()
_lowerCamelCase : Tuple = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCamelCase : Tuple = ViTMAEConfig()
_lowerCamelCase : Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__lowerCAmelCase ,noise=torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase ) )
# verify the logits
_lowerCamelCase : Any = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Tuple = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(__lowerCAmelCase ) ,atol=1e-4 ) ) | 46 | import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_lowercase : str =logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
lowercase : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
lowercase : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
lowercase : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowercase : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[str]:
A : Tuple =self.task_name.lower()
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[int] = "train"
lowercase : int = "dev"
lowercase : Union[str, Any] = "test"
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : GlueDataTrainingArguments
lowercase : str
lowercase : List[InputFeatures]
def __init__( self : str , SCREAMING_SNAKE_CASE__ : GlueDataTrainingArguments , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizerBase , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Union[str, Split] = Split.train , SCREAMING_SNAKE_CASE__ : Optional[str] = None , ) -> List[Any]:
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , SCREAMING_SNAKE_CASE__ , )
A : Any =args
A : Union[str, Any] =glue_processors[args.task_name]()
A : Union[str, Any] =glue_output_modes[args.task_name]
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
try:
A : Any =Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
A : Tuple =os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
A : Optional[Any] =self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A , A : str =label_list[2], label_list[1]
A : Tuple =label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A : int =cached_features_file + '.lock'
with FileLock(SCREAMING_SNAKE_CASE__ ):
if os.path.exists(SCREAMING_SNAKE_CASE__ ) and not args.overwrite_cache:
A : Optional[Any] =time.time()
A : str =torch.load(SCREAMING_SNAKE_CASE__ )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(f'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
A : int =self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
A : Dict =self.processor.get_test_examples(args.data_dir )
else:
A : Optional[Any] =self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
A : Optional[int] =examples[:limit_length]
A : int =glue_convert_examples_to_features(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , max_length=args.max_seq_length , label_list=SCREAMING_SNAKE_CASE__ , output_mode=self.output_mode , )
A : List[Any] =time.time()
torch.save(self.features , SCREAMING_SNAKE_CASE__ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Optional[Any] ) -> Union[str, Any]:
return len(self.features )
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> InputFeatures:
return self.features[i]
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[Any]:
return self.label_list
| 305 | 0 |
import random
class __lowercase :
'''simple docstring'''
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = [ord(_lowercase ) for i in text]
__lowercase = []
__lowercase = []
for i in plain:
__lowercase = random.randint(1 ,300 )
__lowercase = (i + k) * k
cipher.append(_lowercase )
key.append(_lowercase )
return cipher, key
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = []
for i in range(len(_lowercase ) ):
__lowercase = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_lowercase ) )
return "".join(_lowercase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = Onepad().encrypt('''Hello''')
print(c, k)
print(Onepad().decrypt(c, k))
| 702 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = ['image_processor', 'tokenizer']
__magic_name__ = 'LayoutLMv3ImageProcessor'
__magic_name__ = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self , __snake_case=None , __snake_case=None , **__snake_case ):
snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __snake_case , )
snake_case = kwargs.pop('''feature_extractor''' )
snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__snake_case , __snake_case )
def __call__( self , __snake_case , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = True , __snake_case = False , __snake_case = None , __snake_case = None , __snake_case = 0 , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = False , __snake_case = False , __snake_case = False , __snake_case = False , __snake_case = True , __snake_case = None , **__snake_case , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
snake_case = self.image_processor(images=__snake_case , return_tensors=__snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__snake_case , __snake_case ):
snake_case = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case = features['''words''']
snake_case = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
# add pixel values
snake_case = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
snake_case = self.get_overflowing_images(__snake_case , encoded_inputs['''overflow_to_sample_mapping'''] )
snake_case = images
return encoded_inputs
def a_ ( self , __snake_case , __snake_case ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
snake_case = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F''' {len(__snake_case )} and {len(__snake_case )}''' )
return images_with_overflow
def a_ ( self , *__snake_case , **__snake_case ):
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def a_ ( self , *__snake_case , **__snake_case ):
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def a_ ( self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def a_ ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __snake_case , )
return self.image_processor_class
@property
def a_ ( self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __snake_case , )
return self.image_processor
| 550 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=1_3 , __snake_case=3_0 , __snake_case=2 , __snake_case=3 , __snake_case=True , __snake_case=True , __snake_case=3_2 , __snake_case=5 , __snake_case=4 , __snake_case=3_7 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=1_0 , __snake_case=0.02 , ):
snake_case = parent
snake_case = batch_size
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = is_training
snake_case = use_labels
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = type_sequence_label_size
snake_case = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case = (image_size // patch_size) ** 2
snake_case = num_patches + 1
def a_ ( self ):
snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , )
return config, pixel_values
def a_ ( self , __snake_case , __snake_case ):
snake_case = FlaxViTModel(config=__snake_case )
snake_case = model(__snake_case )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
snake_case = (self.image_size, self.image_size)
snake_case = (self.patch_size, self.patch_size)
snake_case = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def a_ ( self , __snake_case , __snake_case ):
snake_case = self.type_sequence_label_size
snake_case = FlaxViTForImageClassification(config=__snake_case )
snake_case = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case = 1
snake_case = FlaxViTForImageClassification(__snake_case )
snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case = model(__snake_case )
def a_ ( self ):
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) ,
) = config_and_inputs
snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def a_ ( self ):
snake_case = FlaxViTModelTester(self )
snake_case = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=3_7 )
def a_ ( self ):
self.config_tester.run_common_tests()
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
def a_ ( self ):
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(__snake_case )
snake_case = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case = [*signature.parameters.keys()]
snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def a_ ( self ):
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case = self._prepare_for_class(__snake_case , __snake_case )
snake_case = model_class(__snake_case )
@jax.jit
def model_jitted(__snake_case , **__snake_case ):
return model(pixel_values=__snake_case , **__snake_case )
with self.subTest('''JIT Enabled''' ):
snake_case = model_jitted(**__snake_case ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
snake_case = model_jitted(**__snake_case ).to_tuple()
self.assertEqual(len(__snake_case ) , len(__snake_case ) )
for jitted_output, output in zip(__snake_case , __snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def a_ ( self ):
for model_class_name in self.all_model_classes:
snake_case = model_class_name.from_pretrained('''google/vit-base-patch16-224''' )
snake_case = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(__snake_case )
| 550 | 1 |
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class lowercase ( lowercase__ ):
def __init__( self , *_snake_case , **_snake_case) -> Union[str, Any]:
super().__init__(*__lowercase , **__lowercase)
UpperCAmelCase_ : List[Any] = {}
def _snake_case ( self , _snake_case , *_snake_case , **_snake_case) -> List[str]:
UpperCAmelCase_ : Optional[Any] = super().add_tokens(__lowercase , *__lowercase , **__lowercase)
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.')
def _snake_case ( self , _snake_case , *_snake_case , _snake_case=1 , **_snake_case) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(__lowercase , *__lowercase , **__lowercase)
output.append(__lowercase)
else:
UpperCAmelCase_ : Optional[int] = []
for i in range(__lowercase):
UpperCAmelCase_ : Any = placeholder_token + F"""_{i}"""
self.try_adding_tokens(__lowercase , *__lowercase , **__lowercase)
output.append(__lowercase)
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""")
UpperCAmelCase_ : int = output
def _snake_case ( self , _snake_case , _snake_case=False , _snake_case=1.0) -> List[Any]:
if isinstance(__lowercase , __lowercase):
UpperCAmelCase_ : Any = []
for i in range(len(__lowercase)):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__lowercase))
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
UpperCAmelCase_ : str = self.token_map[placeholder_token]
UpperCAmelCase_ : Dict = tokens[: 1 + int(len(__lowercase) * prop_tokens_to_load)]
if vector_shuffle:
UpperCAmelCase_ : Any = copy.copy(__lowercase)
random.shuffle(__lowercase)
UpperCAmelCase_ : List[str] = text.replace(__lowercase , ' '.join(__lowercase))
return text
def __call__( self , _snake_case , *_snake_case , _snake_case=False , _snake_case=1.0 , **_snake_case) -> List[Any]:
return super().__call__(
self.replace_placeholder_tokens_in_text(
__lowercase , vector_shuffle=__lowercase , prop_tokens_to_load=__lowercase) , *__lowercase , **__lowercase , )
def _snake_case ( self , _snake_case , *_snake_case , _snake_case=False , _snake_case=1.0 , **_snake_case) -> Any:
return super().encode(
self.replace_placeholder_tokens_in_text(
__lowercase , vector_shuffle=__lowercase , prop_tokens_to_load=__lowercase) , *__lowercase , **__lowercase , )
| 715 |
'''simple docstring'''
import math
class lowercase :
def __init__( self , _snake_case=0) -> Union[str, Any]: # a graph with Node 0,1,...,N-1
UpperCAmelCase_ : Tuple = n
UpperCAmelCase_ : Optional[Any] = [
[math.inf for j in range(0 , _snake_case)] for i in range(0 , _snake_case)
] # adjacency matrix for weight
UpperCAmelCase_ : Tuple = [
[math.inf for j in range(0 , _snake_case)] for i in range(0 , _snake_case)
] # dp[i][j] stores minimum distance from i to j
def _snake_case ( self , _snake_case , _snake_case , _snake_case) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = w
def _snake_case ( self) -> str:
for k in range(0 , self.n):
for i in range(0 , self.n):
for j in range(0 , self.n):
UpperCAmelCase_ : Optional[int] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j])
def _snake_case ( self , _snake_case , _snake_case) -> str:
return self.dp[u][v]
if __name__ == "__main__":
lowerCAmelCase__ = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 471 | 0 |
'''simple docstring'''
import os
import numpy
import onnx
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : Any ):
lowercase = a.name
lowercase = b.name
lowercase = ""
lowercase = ""
lowercase = a == b
lowercase = name_a
lowercase = name_b
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : int ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCamelCase__ , lowerCamelCase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase__ , lowerCamelCase__ )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCamelCase__ , lowerCamelCase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase__ , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] ):
for n in graph_proto.node:
_node_replace_input_with(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Dict ):
lowercase = list(model.graph.initializer )
lowercase = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowercase = inits[i].name
lowercase = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCamelCase__ , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple ):
lowercase = os.path.dirname(lowerCamelCase__ )
lowercase = os.path.basename(lowerCamelCase__ )
lowercase = onnx.load(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) )
lowercase = list(model.graph.initializer )
lowercase = set()
lowercase = {}
lowercase = []
lowercase = 0
for i in range(len(lowerCamelCase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCamelCase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCamelCase__ )
dup_set.add(lowerCamelCase__ )
lowercase = inits[j].data_type
lowercase = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , lowerCamelCase__ )
total_reduced_size += mem_size
lowercase = inits[i].name
lowercase = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCamelCase__ )
else:
lowercase = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
lowercase = sorted(lowerCamelCase__ )
_remove_dup_initializers_from_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowercase = "optimized_" + model_file_name
lowercase = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
onnx.save(lowerCamelCase__ , lowerCamelCase__ )
return new_model
| 588 |
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Union[str, Any] = (PNDMScheduler,)
_UpperCAmelCase :Tuple = (("num_inference_steps", 50),)
def UpperCAmelCase__ ( self : Any , **snake_case__ : Optional[int] ):
lowerCamelCase_ : Optional[int] ={
"num_train_timesteps": 1000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**snake_case__ )
return config
def UpperCAmelCase__ ( self : Any , snake_case__ : List[Any]=0 , **snake_case__ : Union[str, Any] ):
lowerCamelCase_ : List[Any] =dict(self.forward_default_kwargs )
lowerCamelCase_ : int =kwargs.pop("num_inference_steps" , snake_case__ )
lowerCamelCase_ : Union[str, Any] =self.dummy_sample
lowerCamelCase_ : Optional[int] =0.1 * sample
lowerCamelCase_ : List[str] =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCamelCase_ : Tuple =self.get_scheduler_config(**snake_case__ )
lowerCamelCase_ : Dict =scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
lowerCamelCase_ : List[str] =dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
lowerCamelCase_ : Tuple =scheduler_class.from_pretrained(snake_case__ )
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
lowerCamelCase_ : Optional[int] =dummy_past_residuals[:]
lowerCamelCase_ : Dict =scheduler.step_prk(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCamelCase_ : List[str] =new_scheduler.step_prk(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowerCamelCase_ : str =scheduler.step_plms(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCamelCase_ : List[str] =new_scheduler.step_plms(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Union[str, Any] ):
pass
def UpperCAmelCase__ ( self : int , snake_case__ : int=0 , **snake_case__ : int ):
lowerCamelCase_ : int =dict(self.forward_default_kwargs )
lowerCamelCase_ : int =kwargs.pop("num_inference_steps" , snake_case__ )
lowerCamelCase_ : List[Any] =self.dummy_sample
lowerCamelCase_ : str =0.1 * sample
lowerCamelCase_ : Union[str, Any] =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCamelCase_ : Any =self.get_scheduler_config()
lowerCamelCase_ : str =scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCamelCase_ : Dict =dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
lowerCamelCase_ : Union[str, Any] =scheduler_class.from_pretrained(snake_case__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residual (must be after setting timesteps)
lowerCamelCase_ : Optional[Any] =dummy_past_residuals[:]
lowerCamelCase_ : Optional[Any] =scheduler.step_prk(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCamelCase_ : Optional[Any] =new_scheduler.step_prk(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowerCamelCase_ : str =scheduler.step_plms(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCamelCase_ : Union[str, Any] =new_scheduler.step_plms(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : str , **snake_case__ : Optional[Any] ):
lowerCamelCase_ : Optional[int] =self.scheduler_classes[0]
lowerCamelCase_ : int =self.get_scheduler_config(**snake_case__ )
lowerCamelCase_ : Dict =scheduler_class(**snake_case__ )
lowerCamelCase_ : List[str] =10
lowerCamelCase_ : str =self.dummy_model()
lowerCamelCase_ : List[str] =self.dummy_sample_deter
scheduler.set_timesteps(snake_case__ )
for i, t in enumerate(scheduler.prk_timesteps ):
lowerCamelCase_ : Any =model(snake_case__ , snake_case__ )
lowerCamelCase_ : int =scheduler.step_prk(snake_case__ , snake_case__ , snake_case__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowerCamelCase_ : Tuple =model(snake_case__ , snake_case__ )
lowerCamelCase_ : Optional[Any] =scheduler.step_plms(snake_case__ , snake_case__ , snake_case__ ).prev_sample
return sample
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : Optional[Any] =dict(self.forward_default_kwargs )
lowerCamelCase_ : List[str] =kwargs.pop("num_inference_steps" , snake_case__ )
for scheduler_class in self.scheduler_classes:
lowerCamelCase_ : Union[str, Any] =self.get_scheduler_config()
lowerCamelCase_ : int =scheduler_class(**snake_case__ )
lowerCamelCase_ : Dict =self.dummy_sample
lowerCamelCase_ : Dict =0.1 * sample
if num_inference_steps is not None and hasattr(snake_case__ , "set_timesteps" ):
scheduler.set_timesteps(snake_case__ )
elif num_inference_steps is not None and not hasattr(snake_case__ , "set_timesteps" ):
lowerCamelCase_ : Dict =num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCamelCase_ : int =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowerCamelCase_ : int =dummy_past_residuals[:]
lowerCamelCase_ : int =scheduler.step_prk(snake_case__ , 0 , snake_case__ , **snake_case__ ).prev_sample
lowerCamelCase_ : str =scheduler.step_prk(snake_case__ , 1 , snake_case__ , **snake_case__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowerCamelCase_ : List[Any] =scheduler.step_plms(snake_case__ , 0 , snake_case__ , **snake_case__ ).prev_sample
lowerCamelCase_ : Optional[Any] =scheduler.step_plms(snake_case__ , 1 , snake_case__ , **snake_case__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase__ ( self : Tuple ):
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def UpperCAmelCase__ ( self : str ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=snake_case__ )
lowerCamelCase_ : List[Any] =self.scheduler_classes[0]
lowerCamelCase_ : Union[str, Any] =self.get_scheduler_config(steps_offset=1 )
lowerCamelCase_ : Union[str, Any] =scheduler_class(**snake_case__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def UpperCAmelCase__ ( self : str ):
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def UpperCAmelCase__ ( self : str ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def UpperCAmelCase__ ( self : str ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=snake_case__ )
def UpperCAmelCase__ ( self : Any ):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=snake_case__ )
def UpperCAmelCase__ ( self : int ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
lowerCamelCase_ : Any =27
for scheduler_class in self.scheduler_classes:
lowerCamelCase_ : Any =self.dummy_sample
lowerCamelCase_ : Dict =0.1 * sample
lowerCamelCase_ : Optional[Any] =self.get_scheduler_config()
lowerCamelCase_ : Any =scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowerCamelCase_ : str =scheduler.step_prk(snake_case__ , snake_case__ , snake_case__ ).prev_sample
def UpperCAmelCase__ ( self : List[str] ):
with self.assertRaises(snake_case__ ):
lowerCamelCase_ : Tuple =self.scheduler_classes[0]
lowerCamelCase_ : Union[str, Any] =self.get_scheduler_config()
lowerCamelCase_ : List[Any] =scheduler_class(**snake_case__ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ : List[Any] =self.full_loop()
lowerCamelCase_ : List[Any] =torch.sum(torch.abs(snake_case__ ) )
lowerCamelCase_ : List[str] =torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 198.1_318 ) < 1E-2
assert abs(result_mean.item() - 0.2_580 ) < 1E-3
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ : List[Any] =self.full_loop(prediction_type="v_prediction" )
lowerCamelCase_ : Tuple =torch.sum(torch.abs(snake_case__ ) )
lowerCamelCase_ : Dict =torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 67.3_986 ) < 1E-2
assert abs(result_mean.item() - 0.0_878 ) < 1E-3
def UpperCAmelCase__ ( self : str ):
# We specify different beta, so that the first alpha is 0.99
lowerCamelCase_ : Tuple =self.full_loop(set_alpha_to_one=snake_case__ , beta_start=0.01 )
lowerCamelCase_ : List[str] =torch.sum(torch.abs(snake_case__ ) )
lowerCamelCase_ : Optional[int] =torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 230.0_399 ) < 1E-2
assert abs(result_mean.item() - 0.2_995 ) < 1E-3
def UpperCAmelCase__ ( self : str ):
# We specify different beta, so that the first alpha is 0.99
lowerCamelCase_ : int =self.full_loop(set_alpha_to_one=snake_case__ , beta_start=0.01 )
lowerCamelCase_ : Optional[Any] =torch.sum(torch.abs(snake_case__ ) )
lowerCamelCase_ : str =torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 186.9_482 ) < 1E-2
assert abs(result_mean.item() - 0.2_434 ) < 1E-3
| 153 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def SCREAMING_SNAKE_CASE ( lowerCamelCase_):
a__ = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:])
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE =StableDiffusionLatentUpscalePipeline
_SCREAMING_SNAKE_CASE =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
_SCREAMING_SNAKE_CASE =PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
_SCREAMING_SNAKE_CASE =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_SCREAMING_SNAKE_CASE =frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_SCREAMING_SNAKE_CASE =frozenset([] )
_SCREAMING_SNAKE_CASE =True
@property
def lowercase ( self: Tuple ):
'''simple docstring'''
a__ = 1
a__ = 4
a__ = (16, 16)
a__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__A )
return image
def lowercase ( self: Dict ):
'''simple docstring'''
torch.manual_seed(0 )
a__ = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=__A , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=__A , only_cross_attention=__A , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
a__ = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
a__ = EulerDiscreteScheduler(prediction_type='''sample''' )
a__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''quick_gelu''' , projection_dim=512 , )
a__ = CLIPTextModel(__A )
a__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
a__ = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def lowercase ( self: Tuple , __A: Dict , __A: List[str]=0 ):
'''simple docstring'''
if str(__A ).startswith('''mps''' ):
a__ = torch.manual_seed(__A )
else:
a__ = torch.Generator(device=__A ).manual_seed(__A )
a__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowercase ( self: List[str] ):
'''simple docstring'''
a__ = '''cpu'''
a__ = self.get_dummy_components()
a__ = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
a__ = self.get_dummy_inputs(__A )
a__ = pipe(**__A ).images
a__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
a__ = np.array(
[0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5] )
a__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__A , 1e-3 )
def lowercase ( self: Tuple ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def lowercase ( self: Union[str, Any] ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def lowercase ( self: Any ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def lowercase ( self: str ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def lowercase ( self: List[Any] ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def lowercase ( self: Optional[int] ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3 )
def lowercase ( self: int ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def lowercase ( self: Union[str, Any] ):
'''simple docstring'''
a__ = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
a__ = self.get_dummy_components()
a__ = self.pipeline_class(**__A )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
a__ = self.get_dummy_inputs(__A )
a__ = 2
a__ = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
a__ = getattr(__A , scheduler_enum.name )
a__ = scheduler_cls.from_config(pipe.scheduler.config )
a__ = pipe(**__A )[0]
outputs.append(__A )
assert check_same_shape(__A )
@require_torch_gpu
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self: Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self: Union[str, Any] ):
'''simple docstring'''
a__ = torch.manual_seed(33 )
a__ = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
a__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
a__ = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
a__ = pipe(__A , generator=__A , output_type='''latent''' ).images
a__ = upscaler(
prompt=__A , image=__A , num_inference_steps=20 , guidance_scale=0 , generator=__A , output_type='''np''' , ).images[0]
a__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def lowercase ( self: Optional[Any] ):
'''simple docstring'''
a__ = torch.manual_seed(33 )
a__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
a__ = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
a__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
a__ = upscaler(
prompt=__A , image=__A , num_inference_steps=20 , guidance_scale=0 , generator=__A , output_type='''np''' , ).images[0]
a__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 200 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( lowerCamelCase_):
assert column_title.isupper()
a__ = 0
a__ = len(lowerCamelCase_) - 1
a__ = 0
while index >= 0:
a__ = (ord(column_title[index]) - 64) * pow(26 , lowerCamelCase_)
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 200 | 1 |
'''simple docstring'''
UpperCamelCase : Optional[int] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
UpperCamelCase : Tuple = ['a', 'b', 'c', 'd', 'e']
def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict ):
lowerCamelCase__ = start
# add current to visited
visited.append(__lowerCAmelCase )
lowerCamelCase__ = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
lowerCamelCase__ = topological_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# if all neighbors visited add current to sort
sort.append(__lowerCAmelCase )
# if all vertices haven't been visited select a new one to visit
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
for vertice in vertices:
if vertice not in visited:
lowerCamelCase__ = topological_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# return sort
return sort
if __name__ == "__main__":
UpperCamelCase : int = topological_sort('a', [], [])
print(sort)
| 50 |
import math
import sys
def __snake_case ( _UpperCamelCase ) -> int:
if number != int(_UpperCamelCase ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
_a = [-1] * (number + 1)
_a = 0
for i in range(1 , number + 1 ):
_a = sys.maxsize
_a = int(math.sqrt(_UpperCamelCase ) )
for j in range(1 , root + 1 ):
_a = 1 + answers[i - (j**2)]
_a = min(_UpperCamelCase , _UpperCamelCase )
_a = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 487 | 0 |
'''simple docstring'''
import torch
from torch import nn
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1 , lowerCamelCase__=False ):
'''simple docstring'''
super().__init__()
UpperCamelCase = n_token
UpperCamelCase = d_embed
UpperCamelCase = d_proj
UpperCamelCase = cutoffs + [n_token]
UpperCamelCase = [0] + self.cutoffs
UpperCamelCase = div_val
UpperCamelCase = self.cutoffs[0]
UpperCamelCase = len(self.cutoffs ) - 1
UpperCamelCase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCamelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
UpperCamelCase = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCamelCase = nn.ModuleList()
UpperCamelCase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCamelCase__ , lowerCamelCase__ ) ) )
else:
self.out_projs.append(lowerCamelCase__ )
self.out_layers.append(nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCamelCase__ , lowerCamelCase__ ) ) )
self.out_layers.append(nn.Linear(lowerCamelCase__ , r_idx - l_idx ) )
UpperCamelCase = keep_order
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if proj is None:
UpperCamelCase = nn.functional.linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCamelCase = nn.functional.linear(lowerCamelCase__ , proj.t().contiguous() )
UpperCamelCase = nn.functional.linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
UpperCamelCase = hidden[..., :-1, :].contiguous()
UpperCamelCase = labels[..., 1:].contiguous()
UpperCamelCase = hidden.view(-1 , hidden.size(-1 ) )
UpperCamelCase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
UpperCamelCase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCamelCase = self._compute_logit(lowerCamelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
UpperCamelCase = labels != -1_0_0
UpperCamelCase = torch.zeros_like(lowerCamelCase__ , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase = (
-nn.functional.log_softmax(lowerCamelCase__ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCamelCase = nn.functional.log_softmax(lowerCamelCase__ , dim=-1 )
else:
# construct weights and biases
UpperCamelCase , UpperCamelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase = self.out_layers[i].weight
UpperCamelCase = self.out_layers[i].bias
if i == 0:
UpperCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCamelCase__ )
biases.append(lowerCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[0], biases[0], self.out_projs[0]
UpperCamelCase = self._compute_logit(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = nn.functional.log_softmax(lowerCamelCase__ , dim=1 )
if labels is None:
UpperCamelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCamelCase = torch.zeros_like(lowerCamelCase__ , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase = 0
UpperCamelCase = [0] + self.cutoffs
for i in range(len(lowerCamelCase__ ) - 1 ):
UpperCamelCase , UpperCamelCase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCamelCase = (labels >= l_idx) & (labels < r_idx)
UpperCamelCase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCamelCase = labels.index_select(0 , lowerCamelCase__ ) - l_idx
UpperCamelCase = head_logprob.index_select(0 , lowerCamelCase__ )
UpperCamelCase = hidden.index_select(0 , lowerCamelCase__ )
else:
UpperCamelCase = hidden
if i == 0:
if labels is not None:
UpperCamelCase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[i], biases[i], self.out_projs[i]
UpperCamelCase = self._compute_logit(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = nn.functional.log_softmax(lowerCamelCase__ , dim=1 )
UpperCamelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCamelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCamelCase = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , lowerCamelCase__ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
if self.n_clusters == 0:
UpperCamelCase = self._compute_logit(lowerCamelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(lowerCamelCase__ , dim=-1 )
else:
# construct weights and biases
UpperCamelCase , UpperCamelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase = self.out_layers[i].weight
UpperCamelCase = self.out_layers[i].bias
if i == 0:
UpperCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCamelCase__ )
biases.append(lowerCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[0], biases[0], self.out_projs[0]
UpperCamelCase = self._compute_logit(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCamelCase = nn.functional.log_softmax(lowerCamelCase__ , dim=1 )
UpperCamelCase = [0] + self.cutoffs
for i in range(len(lowerCamelCase__ ) - 1 ):
UpperCamelCase , UpperCamelCase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCamelCase = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[i], biases[i], self.out_projs[i]
UpperCamelCase = self._compute_logit(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = nn.functional.log_softmax(lowerCamelCase__ , dim=1 )
UpperCamelCase = head_logprob[:, -i] + tail_logprob_i
UpperCamelCase = logprob_i
return out
| 350 |
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : Tuple = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
snake_case_ : Union[str, Any] = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def __snake_case ( _UpperCAmelCase : List[str], _UpperCAmelCase : List[str]):
UpperCamelCase = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
UpperCamelCase = int(re.match(R'''.*layer_(\d*).*''', _UpperCAmelCase)[1])
layer_number -= 3
return f'h.{layer_number}.' + key
def __snake_case ( _UpperCAmelCase : str):
if dtype == torch.bool:
return 1 / 8
UpperCamelCase = re.search(R'''[^\d](\d+)$''', str(_UpperCAmelCase))
if bit_search is None:
raise ValueError(f'`dtype` is not a valid dtype: {dtype}.')
UpperCamelCase = int(bit_search.groups()[0])
return bit_size // 8
def __snake_case ( _UpperCAmelCase : int, _UpperCAmelCase : Optional[Any], _UpperCAmelCase : str, _UpperCAmelCase : List[Any], _UpperCAmelCase : Optional[int]):
# Construct model
if bloom_config_file == "":
UpperCamelCase = BloomConfig()
else:
UpperCamelCase = BloomConfig.from_json_file(_UpperCAmelCase)
if shard_model:
UpperCamelCase = os.listdir(_UpperCAmelCase)
UpperCamelCase = sorted(filter(lambda _UpperCAmelCase: s.startswith('''layer''') and "model_00" in s, _UpperCAmelCase))
UpperCamelCase = {'''weight_map''': {}, '''metadata''': {}}
UpperCamelCase = 0
UpperCamelCase = None
UpperCamelCase = BloomConfig()
for j, file in enumerate(_UpperCAmelCase):
print('''Processing file: {}'''.format(_UpperCAmelCase))
UpperCamelCase = None
for i in range(_UpperCAmelCase):
# load all TP files
UpperCamelCase = file.replace('''model_00''', f'model_0{i}')
UpperCamelCase = torch.load(os.path.join(_UpperCAmelCase, _UpperCAmelCase), map_location='''cpu''')
# Rename keys in the transformers names
UpperCamelCase = list(temp.keys())
for key in keys:
UpperCamelCase = temp.pop(_UpperCAmelCase)
if tensors is None:
UpperCamelCase = temp
else:
for key in tensors.keys():
if any(key.endswith(_UpperCAmelCase) for end in WEIGHTS_TO_AVERAGE_ENDSWITH):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCamelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN) else 0
# We concatenate these weights accross TP ranks
UpperCamelCase = torch.cat([tensors[key], temp[key]], dim=_UpperCAmelCase)
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_UpperCAmelCase) for end in WEIGHTS_TO_AVERAGE_ENDSWITH):
UpperCamelCase = tensors[key] / pretraining_tp
torch.save(
_UpperCAmelCase, os.path.join(
_UpperCAmelCase, '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1).zfill(5), str(len(_UpperCAmelCase)).zfill(5)), ), )
for key in tensors.keys():
UpperCamelCase = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype)
if key not in index_dict["weight_map"]:
UpperCamelCase = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1).zfill(5), str(len(_UpperCAmelCase)).zfill(5))
UpperCamelCase = BloomConfig()
UpperCamelCase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCamelCase = total_size
with open(_UpperCAmelCase, '''w''', encoding='''utf-8''') as f:
f.write(config.to_json_string())
with open(os.path.join(_UpperCAmelCase, WEIGHTS_NAME + '''.index.json'''), '''w''', encoding='''utf-8''') as f:
UpperCamelCase = json.dumps(_UpperCAmelCase, indent=2, sort_keys=_UpperCAmelCase) + '''\n'''
f.write(_UpperCAmelCase)
else:
UpperCamelCase = BloomModel(_UpperCAmelCase)
UpperCamelCase = os.listdir(_UpperCAmelCase)
UpperCamelCase = sorted(filter(lambda _UpperCAmelCase: s.startswith('''layer''') and "model_00" in s, _UpperCAmelCase))
UpperCamelCase = None
for i, file in enumerate(_UpperCAmelCase):
UpperCamelCase = None
for i in range(_UpperCAmelCase):
# load all TP files
UpperCamelCase = file.replace('''model_00''', f'model_0{i}')
UpperCamelCase = torch.load(os.path.join(_UpperCAmelCase, _UpperCAmelCase), map_location='''cpu''')
# Rename keys in the transformers names
UpperCamelCase = list(temp.keys())
for key in keys:
UpperCamelCase = temp.pop(_UpperCAmelCase)
if tensors is None:
UpperCamelCase = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_UpperCAmelCase) for end in WEIGHTS_TO_AVERAGE_ENDSWITH):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCamelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN) else 0
# We concatenate these weights accross TP ranks
UpperCamelCase = torch.cat([tensors[key], temp[key]], dim=_UpperCAmelCase)
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_UpperCAmelCase) for end in WEIGHTS_TO_AVERAGE_ENDSWITH):
UpperCamelCase = tensors[key] / pretraining_tp
UpperCamelCase = model.load_state_dict(_UpperCAmelCase, strict=_UpperCAmelCase)
assert not other_keys.unexpected_keys, f'The keys {other_keys.unexpected_keys} are unexpected'
if missing_keys is None:
UpperCamelCase = set(other_keys.missing_keys)
else:
UpperCamelCase = missing_keys.intersection(set(other_keys.missing_keys))
assert not missing_keys, f'The keys {missing_keys} are missing'
# Save pytorch-model
os.makedirs(_UpperCAmelCase, exist_ok=_UpperCAmelCase)
UpperCamelCase = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCamelCase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}')
if config.torch_dtype is not None:
UpperCamelCase = model.to(config.torch_dtype)
torch.save(model.state_dict(), _UpperCAmelCase)
print(f'Save configuration file to {pytorch_config_dump_path}')
with open(_UpperCAmelCase, '''w''', encoding='''utf-8''') as f:
f.write(config.to_json_string())
if __name__ == "__main__":
snake_case_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
snake_case_ : List[str] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 350 | 1 |
import numpy as np
def __UpperCAmelCase ( a_ , a_ , a_ = 1E-12 , a_ = 1_00 , ):
assert np.shape(a_)[0] == np.shape(a_)[1]
# Ensure proper dimensionality.
assert np.shape(a_)[0] == np.shape(a_)[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(a_) == np.iscomplexobj(a_)
snake_case_ = np.iscomplexobj(a_)
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(a_ , input_matrix.conj().T)
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
snake_case_ = False
snake_case_ = 0
snake_case_ = 0
snake_case_ = 1E12
while not convergence:
# Multiple matrix by the vector.
snake_case_ = np.dot(a_ , a_)
# Normalize the resulting output vector.
snake_case_ = w / np.linalg.norm(a_)
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
snake_case_ = vector.conj().T if is_complex else vector.T
snake_case_ = np.dot(a_ , np.dot(a_ , a_))
# Check convergence.
snake_case_ = np.abs(lambda_ - lambda_previous) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
snake_case_ = True
snake_case_ = lambda_
if is_complex:
snake_case_ = np.real(lambda_)
return lambda_, vector
def __UpperCAmelCase ( ):
snake_case_ = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]])
snake_case_ = np.array([41, 4, 20])
snake_case_ = real_input_matrix.astype(np.complexaaa)
snake_case_ = np.triu(1J * complex_input_matrix , 1)
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
snake_case_ = np.array([41, 4, 20]).astype(np.complexaaa)
for problem_type in ["real", "complex"]:
if problem_type == "real":
snake_case_ = real_input_matrix
snake_case_ = real_vector
elif problem_type == "complex":
snake_case_ = complex_input_matrix
snake_case_ = complex_vector
# Our implementation.
snake_case_ , snake_case_ = power_iteration(a_ , a_)
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
snake_case_ , snake_case_ = np.linalg.eigh(a_)
# Last eigenvalue is the maximum one.
snake_case_ = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
snake_case_ = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(a_) - np.abs(a_)) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 198 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( a_ , a_ , a_ , a_="attention"):
snake_case_ = params[f'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
snake_case_ = params[f'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
snake_case_ = params[f'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
snake_case_ = params[f'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def __UpperCAmelCase ( a_ , a_ , a_ , a_=False):
if split_mlp_wi:
snake_case_ = params[f'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
snake_case_ = params[f'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
snake_case_ = (wi_a, wi_a)
else:
snake_case_ = params[f'''{prefix}/layers_{i}/mlp/wi/kernel''']
snake_case_ = params[f'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def __UpperCAmelCase ( a_ , a_ , a_ , a_):
return params[f'''{prefix}/layers_{i}/{layer_name}/scale''']
def __UpperCAmelCase ( a_ , *, a_ , a_):
snake_case_ = traverse_util.flatten_dict(variables['target'])
snake_case_ = {'/'.join(a_): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
snake_case_ = 'encoder/layers_0/mlp/wi_0/kernel' in old
print('Split MLP:' , a_)
snake_case_ = collections.OrderedDict()
# Shared embeddings.
snake_case_ = old['token_embedder/embedding']
# Encoder.
for i in range(a_):
# Block i, layer 0 (Self Attention).
snake_case_ = tax_layer_norm_lookup(a_ , a_ , 'encoder' , 'pre_attention_layer_norm')
snake_case_ , snake_case_ , snake_case_ , snake_case_ = tax_attention_lookup(a_ , a_ , 'encoder' , 'attention')
snake_case_ = layer_norm
snake_case_ = k.T
snake_case_ = o.T
snake_case_ = q.T
snake_case_ = v.T
# Block i, layer 1 (MLP).
snake_case_ = tax_layer_norm_lookup(a_ , a_ , 'encoder' , 'pre_mlp_layer_norm')
snake_case_ , snake_case_ = tax_mlp_lookup(a_ , a_ , 'encoder' , a_)
snake_case_ = layer_norm
if split_mlp_wi:
snake_case_ = wi[0].T
snake_case_ = wi[1].T
else:
snake_case_ = wi.T
snake_case_ = wo.T
snake_case_ = old[
'encoder/relpos_bias/rel_embedding'
].T
snake_case_ = old['encoder/encoder_norm/scale']
if not is_encoder_only:
# Decoder.
for i in range(a_):
# Block i, layer 0 (Self Attention).
snake_case_ = tax_layer_norm_lookup(a_ , a_ , 'decoder' , 'pre_self_attention_layer_norm')
snake_case_ , snake_case_ , snake_case_ , snake_case_ = tax_attention_lookup(a_ , a_ , 'decoder' , 'self_attention')
snake_case_ = layer_norm
snake_case_ = k.T
snake_case_ = o.T
snake_case_ = q.T
snake_case_ = v.T
# Block i, layer 1 (Cross Attention).
snake_case_ = tax_layer_norm_lookup(a_ , a_ , 'decoder' , 'pre_cross_attention_layer_norm')
snake_case_ , snake_case_ , snake_case_ , snake_case_ = tax_attention_lookup(a_ , a_ , 'decoder' , 'encoder_decoder_attention')
snake_case_ = layer_norm
snake_case_ = k.T
snake_case_ = o.T
snake_case_ = q.T
snake_case_ = v.T
# Block i, layer 2 (MLP).
snake_case_ = tax_layer_norm_lookup(a_ , a_ , 'decoder' , 'pre_mlp_layer_norm')
snake_case_ , snake_case_ = tax_mlp_lookup(a_ , a_ , 'decoder' , a_)
snake_case_ = layer_norm
if split_mlp_wi:
snake_case_ = wi[0].T
snake_case_ = wi[1].T
else:
snake_case_ = wi.T
snake_case_ = wo.T
snake_case_ = old['decoder/decoder_norm/scale']
snake_case_ = old[
'decoder/relpos_bias/rel_embedding'
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
snake_case_ = old['decoder/logits_dense/kernel'].T
return new
def __UpperCAmelCase ( a_ , a_):
snake_case_ = collections.OrderedDict([(k, torch.from_numpy(v.copy())) for (k, v) in converted_params.items()])
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
snake_case_ = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
snake_case_ = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.')
snake_case_ = state_dict['shared.weight']
return state_dict
def __UpperCAmelCase ( a_ , a_ , a_ , a_):
snake_case_ = checkpoints.load_tax_checkpoint(a_)
snake_case_ = convert_tax_to_pytorch(a_ , num_layers=config.num_layers , is_encoder_only=a_)
snake_case_ = make_state_dict(a_ , a_)
model.load_state_dict(a_ , strict=a_)
def __UpperCAmelCase ( a_ , a_ , a_ , a_ = False):
snake_case_ = TaConfig.from_json_file(a_)
print(f'''Building PyTorch model from configuration: {config}''')
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
snake_case_ = TaEncoderModel(a_)
else:
snake_case_ = TaForConditionalGeneration(a_)
# Load weights from tf checkpoint
load_tax_weights_in_ta(a_ , a_ , a_ , a_)
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''')
model.save_pretrained(a_)
# Verify that we can load the checkpoint.
model.from_pretrained(a_)
print('Done')
if __name__ == "__main__":
lowercase = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
lowercase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 198 | 1 |
import string
import numpy
def lowerCamelCase( a__ ,a__):
return b if a == 0 else greatest_common_divisor(b % a ,a__)
class A__ :
UpperCAmelCase = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCAmelCase = numpy.vectorize(lambda UpperCamelCase__ : x % 36 )
UpperCAmelCase = numpy.vectorize(UpperCamelCase__ )
def __init__( self : int , _a : numpy.ndarray ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.modulus(_a ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_SCREAMING_SNAKE_CASE =encrypt_key.shape[0]
def __UpperCamelCase ( self : Dict , _a : str ) -> int:
"""simple docstring"""
return self.key_string.index(_a )
def __UpperCamelCase ( self : List[str] , _a : int ) -> str:
"""simple docstring"""
return self.key_string[round(_a )]
def __UpperCamelCase ( self : int ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_SCREAMING_SNAKE_CASE =det % len(self.key_string )
_SCREAMING_SNAKE_CASE =len(self.key_string )
if greatest_common_divisor(_a , len(self.key_string ) ) != 1:
_SCREAMING_SNAKE_CASE =(
f"determinant modular {req_l} of encryption key({det}) "
f"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(_a )
def __UpperCamelCase ( self : List[str] , _a : str ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[char for char in text.upper() if char in self.key_string]
_SCREAMING_SNAKE_CASE =chars[-1]
while len(_a ) % self.break_key != 0:
chars.append(_a )
return "".join(_a )
def __UpperCamelCase ( self : List[str] , _a : str ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.process_text(text.upper() )
_SCREAMING_SNAKE_CASE =''''''
for i in range(0 , len(_a ) - self.break_key + 1 , self.break_key ):
_SCREAMING_SNAKE_CASE =text[i : i + self.break_key]
_SCREAMING_SNAKE_CASE =[self.replace_letters(_a ) for char in batch]
_SCREAMING_SNAKE_CASE =numpy.array([vec] ).T
_SCREAMING_SNAKE_CASE =self.modulus(self.encrypt_key.dot(_a ) ).T.tolist()[
0
]
_SCREAMING_SNAKE_CASE =''''''.join(
self.replace_digits(_a ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def __UpperCamelCase ( self : Union[str, Any] ) -> numpy.ndarray:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_SCREAMING_SNAKE_CASE =det % len(self.key_string )
_SCREAMING_SNAKE_CASE =None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
_SCREAMING_SNAKE_CASE =i
break
_SCREAMING_SNAKE_CASE =(
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(_a ) )
def __UpperCamelCase ( self : str , _a : str ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.make_decrypt_key()
_SCREAMING_SNAKE_CASE =self.process_text(text.upper() )
_SCREAMING_SNAKE_CASE =''''''
for i in range(0 , len(_a ) - self.break_key + 1 , self.break_key ):
_SCREAMING_SNAKE_CASE =text[i : i + self.break_key]
_SCREAMING_SNAKE_CASE =[self.replace_letters(_a ) for char in batch]
_SCREAMING_SNAKE_CASE =numpy.array([vec] ).T
_SCREAMING_SNAKE_CASE =self.modulus(decrypt_key.dot(_a ) ).T.tolist()[0]
_SCREAMING_SNAKE_CASE =''''''.join(
self.replace_digits(_a ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =int(input('''Enter the order of the encryption key: '''))
_SCREAMING_SNAKE_CASE =[]
print('''Enter each row of the encryption key with space separated integers''')
for _ in range(a__):
_SCREAMING_SNAKE_CASE =[int(a__) for x in input().split()]
hill_matrix.append(a__)
_SCREAMING_SNAKE_CASE =HillCipher(numpy.array(a__))
print('''Would you like to encrypt or decrypt some text? (1 or 2)''')
_SCREAMING_SNAKE_CASE =input('''\n1. Encrypt\n2. Decrypt\n''')
if option == "1":
_SCREAMING_SNAKE_CASE =input('''What text would you like to encrypt?: ''')
print('''Your encrypted text is:''')
print(hc.encrypt(a__))
elif option == "2":
_SCREAMING_SNAKE_CASE =input('''What text would you like to decrypt?: ''')
print('''Your decrypted text is:''')
print(hc.decrypt(a__))
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 716 |
from collections.abc import Generator
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =0, 1
while True:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =b, a + b
yield b
def lowerCamelCase( a__ = 1000):
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =fibonacci_generator()
while len(str(next(a__))) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 191 | 0 |
# Function to print upper half of diamond (pyramid)
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> Dict:
for i in range(0 , snake_case_ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def lowerCamelCase__ ( snake_case_ : Union[str, Any] ) -> Optional[Any]:
for i in range(snake_case_ , 0 , -1 ):
for _ in range(snake_case_ , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def lowerCamelCase__ ( snake_case_ : str ) -> List[Any]:
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(snake_case_ ) # upper half
reverse_floyd(snake_case_ ) # lower half
if __name__ == "__main__":
print(R'| /\ | |- | |- |--| |\ /| |-')
print(R'|/ \| |- |_ |_ |__| | \/ | |_')
snake_case_ = 1
while K:
snake_case_ = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
snake_case_ = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...')
| 592 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 592 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowerCamelCase__ ( unittest.TestCase ):
__UpperCAmelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCAmelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :Dict =TextaTextGenerationPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
return generator, ["Something to write", "Something else"]
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :Optional[int] =generator("""Something there""" )
self.assertEqual(lowerCAmelCase__ , [{"""generated_text""": ANY(lowerCAmelCase__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
_UpperCamelCase :int =generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=lowerCAmelCase__ )
self.assertEqual(
lowerCAmelCase__ , [
[{"""generated_text""": ANY(lowerCAmelCase__ )}, {"""generated_text""": ANY(lowerCAmelCase__ )}],
[{"""generated_text""": ANY(lowerCAmelCase__ )}, {"""generated_text""": ANY(lowerCAmelCase__ )}],
] , )
_UpperCamelCase :Any =generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=lowerCAmelCase__ )
self.assertEqual(
lowerCAmelCase__ , [
[{"""generated_text""": ANY(lowerCAmelCase__ )}, {"""generated_text""": ANY(lowerCAmelCase__ )}],
[{"""generated_text""": ANY(lowerCAmelCase__ )}, {"""generated_text""": ANY(lowerCAmelCase__ )}],
] , )
with self.assertRaises(lowerCAmelCase__ ):
generator(4 )
@require_torch
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :Dict =pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
_UpperCamelCase :Any =generator("""Something there""" , do_sample=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , [{"""generated_text""": """"""}] )
_UpperCamelCase :int =3
_UpperCamelCase :Union[str, Any] =generator(
"""Something there""" , num_return_sequences=lowerCAmelCase__ , num_beams=lowerCAmelCase__ , )
_UpperCamelCase :Tuple =[
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase :str =generator("""This is a test""" , do_sample=lowerCAmelCase__ , num_return_sequences=2 , return_tensors=lowerCAmelCase__ )
self.assertEqual(
lowerCAmelCase__ , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
_UpperCamelCase :Dict =generator.model.config.eos_token_id
_UpperCamelCase :Any ="""<pad>"""
_UpperCamelCase :Union[str, Any] =generator(
["""This is a test""", """This is a second test"""] , do_sample=lowerCAmelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=lowerCAmelCase__ , )
self.assertEqual(
lowerCAmelCase__ , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase :int =pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
_UpperCamelCase :List[Any] =generator("""Something there""" , do_sample=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , [{"""generated_text""": """"""}] ) | 512 | '''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
_lowerCamelCase : Dict = logging.get_logger(__name__)
@add_end_docstrings(__snake_case )
class lowerCamelCase__ ( __snake_case ):
def __init__( self , **lowerCAmelCase__ ) -> List[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
"""simple docstring"""
return super().__call__(lowerCAmelCase__ , **lowerCAmelCase__ )
def _UpperCamelCase ( self , **lowerCAmelCase__ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :str ={}
if "candidate_labels" in kwargs:
_UpperCamelCase :str =kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
_UpperCamelCase :Dict =kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__="This is a sound of {}." ) -> Tuple:
"""simple docstring"""
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_UpperCamelCase :List[Any] =requests.get(lowerCAmelCase__ ).content
else:
with open(lowerCAmelCase__ , """rb""" ) as f:
_UpperCamelCase :Optional[int] =f.read()
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase :Dict =ffmpeg_read(lowerCAmelCase__ , self.feature_extractor.sampling_rate )
if not isinstance(lowerCAmelCase__ , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
_UpperCamelCase :List[str] =self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
_UpperCamelCase :Optional[Any] =candidate_labels
_UpperCamelCase :Union[str, Any] =[hypothesis_template.format(lowerCAmelCase__ ) for x in candidate_labels]
_UpperCamelCase :Tuple =self.tokenizer(lowerCAmelCase__ , return_tensors=self.framework , padding=lowerCAmelCase__ )
_UpperCamelCase :Tuple =[text_inputs]
return inputs
def _UpperCamelCase ( self , lowerCAmelCase__ ) -> Dict:
"""simple docstring"""
_UpperCamelCase :Optional[int] =model_inputs.pop("""candidate_labels""" )
_UpperCamelCase :Any =model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , lowerCAmelCase__ ):
_UpperCamelCase :Union[str, Any] =text_inputs[0]
else:
# Batching case.
_UpperCamelCase :List[Any] =text_inputs[0][0]
_UpperCamelCase :Any =self.model(**lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase :Union[str, Any] ={
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def _UpperCamelCase ( self , lowerCAmelCase__ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :Optional[int] =model_outputs.pop("""candidate_labels""" )
_UpperCamelCase :Tuple =model_outputs["""logits"""][0]
if self.framework == "pt":
_UpperCamelCase :Optional[int] =logits.softmax(dim=0 )
_UpperCamelCase :int =probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
_UpperCamelCase :int =[
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(lowerCAmelCase__ , lowerCAmelCase__ ) , key=lambda lowerCAmelCase__ : -x[0] )
]
return result | 512 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = "RegNetConfig"
# Base docstring
_lowerCAmelCase = "facebook/regnet-y-040"
_lowerCAmelCase = [1, 1_0_8_8, 7, 7]
# Image classification docstring
_lowerCAmelCase = "facebook/regnet-y-040"
_lowerCAmelCase = "tabby, tabby cat"
_lowerCAmelCase = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __A ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase = 3 , _lowerCamelCase = 1 , _lowerCamelCase = 1 , _lowerCamelCase = "relu" , **_lowerCamelCase , )-> List[Any]:
super().__init__(**_lowerCamelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowercase__ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowercase__ = tf.keras.layers.ConvaD(
filters=_lowerCamelCase , kernel_size=_lowerCamelCase , strides=_lowerCamelCase , padding='''VALID''' , groups=_lowerCamelCase , use_bias=_lowerCamelCase , name='''convolution''' , )
lowercase__ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
lowercase__ = ACTaFN[activation] if activation is not None else tf.identity
def snake_case_( self , _lowerCamelCase )-> Dict:
lowercase__ = self.convolution(self.padding(_lowerCamelCase ) )
lowercase__ = self.normalization(_lowerCamelCase )
lowercase__ = self.activation(_lowerCamelCase )
return hidden_state
class __A ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , **_lowerCamelCase )-> Optional[int]:
super().__init__(**_lowerCamelCase )
lowercase__ = config.num_channels
lowercase__ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def snake_case_( self , _lowerCamelCase )-> Tuple:
lowercase__ = shape_list(_lowerCamelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowercase__ = tf.transpose(_lowerCamelCase , perm=(0, 2, 3, 1) )
lowercase__ = self.embedder(_lowerCamelCase )
return hidden_state
class __A ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase = 2 , **_lowerCamelCase )-> List[str]:
super().__init__(**_lowerCamelCase )
lowercase__ = tf.keras.layers.ConvaD(
filters=_lowerCamelCase , kernel_size=1 , strides=_lowerCamelCase , use_bias=_lowerCamelCase , name='''convolution''' )
lowercase__ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
def snake_case_( self , _lowerCamelCase , _lowerCamelCase = False )-> tf.Tensor:
return self.normalization(self.convolution(_lowerCamelCase ) , training=_lowerCamelCase )
class __A ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )-> Optional[int]:
super().__init__(**_lowerCamelCase )
lowercase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_lowerCamelCase , name='''pooler''' )
lowercase__ = [
tf.keras.layers.ConvaD(filters=_lowerCamelCase , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=_lowerCamelCase , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def snake_case_( self , _lowerCamelCase )-> Tuple:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
lowercase__ = self.pooler(_lowerCamelCase )
for layer_module in self.attention:
lowercase__ = layer_module(_lowerCamelCase )
lowercase__ = hidden_state * pooled
return hidden_state
class __A ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 , **_lowerCamelCase )-> Tuple:
super().__init__(**_lowerCamelCase )
lowercase__ = in_channels != out_channels or stride != 1
lowercase__ = max(1 , out_channels // config.groups_width )
lowercase__ = (
TFRegNetShortCut(_lowerCamelCase , stride=_lowerCamelCase , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowercase__ = [
TFRegNetConvLayer(_lowerCamelCase , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
_lowerCamelCase , stride=_lowerCamelCase , groups=_lowerCamelCase , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(_lowerCamelCase , kernel_size=1 , activation=_lowerCamelCase , name='''layer.2''' ),
]
lowercase__ = ACTaFN[config.hidden_act]
def snake_case_( self , _lowerCamelCase )-> List[Any]:
lowercase__ = hidden_state
for layer_module in self.layers:
lowercase__ = layer_module(_lowerCamelCase )
lowercase__ = self.shortcut(_lowerCamelCase )
hidden_state += residual
lowercase__ = self.activation(_lowerCamelCase )
return hidden_state
class __A ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 , **_lowerCamelCase )-> List[str]:
super().__init__(**_lowerCamelCase )
lowercase__ = in_channels != out_channels or stride != 1
lowercase__ = max(1 , out_channels // config.groups_width )
lowercase__ = (
TFRegNetShortCut(_lowerCamelCase , stride=_lowerCamelCase , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
lowercase__ = [
TFRegNetConvLayer(_lowerCamelCase , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
_lowerCamelCase , stride=_lowerCamelCase , groups=_lowerCamelCase , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(_lowerCamelCase , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(_lowerCamelCase , kernel_size=1 , activation=_lowerCamelCase , name='''layer.3''' ),
]
lowercase__ = ACTaFN[config.hidden_act]
def snake_case_( self , _lowerCamelCase )-> Dict:
lowercase__ = hidden_state
for layer_module in self.layers:
lowercase__ = layer_module(_lowerCamelCase )
lowercase__ = self.shortcut(_lowerCamelCase )
hidden_state += residual
lowercase__ = self.activation(_lowerCamelCase )
return hidden_state
class __A ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 2 , _lowerCamelCase = 2 , **_lowerCamelCase )-> Union[str, Any]:
super().__init__(**_lowerCamelCase )
lowercase__ = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
lowercase__ = [
# downsampling is done in the first layer with stride of 2
layer(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , stride=_lowerCamelCase , name='''layers.0''' ),
*[layer(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def snake_case_( self , _lowerCamelCase )-> Any:
for layer_module in self.layers:
lowercase__ = layer_module(_lowerCamelCase )
return hidden_state
class __A ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , **_lowerCamelCase )-> List[Any]:
super().__init__(**_lowerCamelCase )
lowercase__ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_lowerCamelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
lowercase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_lowerCamelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , depth=_lowerCamelCase , name=f'''stages.{i+1}''' ) )
def snake_case_( self , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = True )-> TFBaseModelOutputWithNoAttention:
lowercase__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__ = hidden_states + (hidden_state,)
lowercase__ = stage_module(_lowerCamelCase )
if output_hidden_states:
lowercase__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_lowerCamelCase , hidden_states=_lowerCamelCase )
@keras_serializable
class __A ( tf.keras.layers.Layer ):
"""simple docstring"""
A_ = RegNetConfig
def __init__( self , _lowerCamelCase , **_lowerCamelCase )-> List[str]:
super().__init__(**_lowerCamelCase )
lowercase__ = config
lowercase__ = TFRegNetEmbeddings(_lowerCamelCase , name='''embedder''' )
lowercase__ = TFRegNetEncoder(_lowerCamelCase , name='''encoder''' )
lowercase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_lowerCamelCase , name='''pooler''' )
@unpack_inputs
def snake_case_( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , )-> TFBaseModelOutputWithPoolingAndNoAttention:
lowercase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = self.embedder(_lowerCamelCase , training=_lowerCamelCase )
lowercase__ = self.encoder(
_lowerCamelCase , output_hidden_states=_lowerCamelCase , return_dict=_lowerCamelCase , training=_lowerCamelCase )
lowercase__ = encoder_outputs[0]
lowercase__ = self.pooler(_lowerCamelCase )
# Change to NCHW output format have uniformity in the modules
lowercase__ = tf.transpose(_lowerCamelCase , perm=(0, 3, 1, 2) )
lowercase__ = tf.transpose(_lowerCamelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowercase__ = tuple([tf.transpose(_lowerCamelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_lowerCamelCase , pooler_output=_lowerCamelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class __A ( a ):
"""simple docstring"""
A_ = RegNetConfig
A_ = 'regnet'
A_ = 'pixel_values'
@property
def snake_case_( self )-> Tuple:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
_lowerCAmelCase = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowerCAmelCase = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , a , )
class __A ( a ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )-> Optional[Any]:
super().__init__(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
lowercase__ = TFRegNetMainLayer(_lowerCamelCase , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(_lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case_( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase=False , )-> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
lowercase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = self.regnet(
pixel_values=_lowerCamelCase , output_hidden_states=_lowerCamelCase , return_dict=_lowerCamelCase , training=_lowerCamelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , a , )
class __A ( a , a ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )-> Optional[Any]:
super().__init__(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
lowercase__ = config.num_labels
lowercase__ = TFRegNetMainLayer(_lowerCamelCase , name='''regnet''' )
# classification head
lowercase__ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case_( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase=False , )-> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
lowercase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = self.regnet(
_lowerCamelCase , output_hidden_states=_lowerCamelCase , return_dict=_lowerCamelCase , training=_lowerCamelCase )
lowercase__ = outputs.pooler_output if return_dict else outputs[1]
lowercase__ = self.classifier[0](_lowerCamelCase )
lowercase__ = self.classifier[1](_lowerCamelCase )
lowercase__ = None if labels is None else self.hf_compute_loss(labels=_lowerCamelCase , logits=_lowerCamelCase )
if not return_dict:
lowercase__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_lowerCamelCase , logits=_lowerCamelCase , hidden_states=outputs.hidden_states )
| 161 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase : float , lowercase : float ) ->float:
"""simple docstring"""
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 161 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : str = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class __snake_case (_a ):
lowerCAmelCase__ = "pix2struct_text_model"
lowerCAmelCase__ = ["past_key_values"]
lowerCAmelCase__ = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[Any] , _UpperCAmelCase : List[Any]=5_0244 , _UpperCAmelCase : Optional[int]=768 , _UpperCAmelCase : int=64 , _UpperCAmelCase : Optional[int]=2048 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : Tuple=32 , _UpperCAmelCase : Optional[int]=128 , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Optional[int]=1E-6 , _UpperCAmelCase : Dict=1.0 , _UpperCAmelCase : List[Any]="gelu_new" , _UpperCAmelCase : List[Any]=0 , _UpperCAmelCase : Any=False , _UpperCAmelCase : Dict=0 , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : str=False , _UpperCAmelCase : List[Any]=True , **_UpperCAmelCase : List[str] , ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : List[str] = d_kv
_lowerCAmelCase : Any = d_ff
_lowerCAmelCase : Optional[Any] = num_layers
_lowerCAmelCase : List[str] = num_heads
_lowerCAmelCase : List[Any] = relative_attention_num_buckets
_lowerCAmelCase : List[str] = relative_attention_max_distance
_lowerCAmelCase : str = dropout_rate
_lowerCAmelCase : List[Any] = layer_norm_epsilon
_lowerCAmelCase : str = initializer_factor
_lowerCAmelCase : int = use_cache
_lowerCAmelCase : str = eos_token_id
_lowerCAmelCase : List[str] = decoder_start_token_id
# for backwards compatibility
_lowerCAmelCase : Union[str, Any] = dense_act_fn
super().__init__(
pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , is_decoder=_UpperCAmelCase , **_UpperCAmelCase , )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Union[str, Any] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase : int = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
_lowerCAmelCase : Tuple = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __snake_case (_a ):
lowerCAmelCase__ = "pix2struct_vision_model"
def __init__( self : int , _UpperCAmelCase : Any=768 , _UpperCAmelCase : List[Any]=768 , _UpperCAmelCase : Optional[int]=2048 , _UpperCAmelCase : List[Any]=64 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : int="gelu_new" , _UpperCAmelCase : Dict=1E-6 , _UpperCAmelCase : Optional[Any]=0.0 , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Optional[int]=1E-10 , _UpperCAmelCase : Union[str, Any]=1.0 , _UpperCAmelCase : Union[str, Any]=4096 , _UpperCAmelCase : int=32 , _UpperCAmelCase : Any=128 , **_UpperCAmelCase : int , ) -> List[str]:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : str = patch_embed_hidden_size
_lowerCAmelCase : str = d_ff
_lowerCAmelCase : int = dropout_rate
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : Optional[Any] = num_attention_heads
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Tuple = initializer_factor
_lowerCAmelCase : Optional[Any] = attention_dropout
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : Any = dense_act_fn
_lowerCAmelCase : str = seq_len
_lowerCAmelCase : List[str] = relative_attention_num_buckets
_lowerCAmelCase : List[Any] = relative_attention_max_distance
_lowerCAmelCase : List[Any] = d_kv
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Dict ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
_lowerCAmelCase : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __snake_case (_a ):
lowerCAmelCase__ = "pix2struct"
lowerCAmelCase__ = True
def __init__( self : Tuple , _UpperCAmelCase : str=None , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Dict=1.0 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : str=False , _UpperCAmelCase : str=False , _UpperCAmelCase : Any=True , **_UpperCAmelCase : int , ) -> int:
'''simple docstring'''
super().__init__(tie_word_embeddings=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
if text_config is None:
_lowerCAmelCase : Tuple = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
_lowerCAmelCase : int = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
_lowerCAmelCase : Union[str, Any] = PixaStructTextConfig(**_UpperCAmelCase )
_lowerCAmelCase : int = PixaStructVisionConfig(**_UpperCAmelCase )
_lowerCAmelCase : int = self.text_config.decoder_start_token_id
_lowerCAmelCase : str = self.text_config.pad_token_id
_lowerCAmelCase : Tuple = self.text_config.eos_token_id
_lowerCAmelCase : Optional[Any] = initializer_factor
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : int = self.initializer_range
_lowerCAmelCase : int = self.initializer_range
_lowerCAmelCase : Union[str, Any] = is_vqa
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , _UpperCAmelCase : PixaStructTextConfig , _UpperCAmelCase : PixaStructVisionConfig , **_UpperCAmelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : List[str] = copy.deepcopy(self.__dict__ )
_lowerCAmelCase : List[str] = self.text_config.to_dict()
_lowerCAmelCase : int = self.vision_config.to_dict()
_lowerCAmelCase : Dict = self.__class__.model_type
return output
| 196 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _UpperCAmelCase (UpperCamelCase_ : list[list[float]] ):
'''simple docstring'''
_lowerCAmelCase : int = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(UpperCamelCase_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
_lowerCAmelCase : Optional[Any] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
_lowerCAmelCase : Union[str, Any] = [[0.0, 0.0], [0.0, 0.0]]
_lowerCAmelCase , _lowerCAmelCase : Any = matrix[1][1], matrix[0][0]
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(UpperCamelCase_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(UpperCamelCase_ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
_lowerCAmelCase : List[str] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
_lowerCAmelCase : Optional[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
_lowerCAmelCase : Optional[Any] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
_lowerCAmelCase : str = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
_lowerCAmelCase : Any = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
_lowerCAmelCase : List[str] = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
_lowerCAmelCase : Tuple = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
_lowerCAmelCase : Any = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
_lowerCAmelCase : int = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
_lowerCAmelCase : Optional[Any] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
_lowerCAmelCase : Tuple = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
_lowerCAmelCase : List[str] = array(UpperCamelCase_ )
for i in range(3 ):
for j in range(3 ):
_lowerCAmelCase : List[Any] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
_lowerCAmelCase : Tuple = array(UpperCamelCase_ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(UpperCamelCase_ )
# Calculate the inverse of the matrix
return [[float(d(UpperCamelCase_ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 196 | 1 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _lowercase ( __lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = FileLock(str(tmpdir / """foo.lock""" ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = FileLock(str(tmpdir / """foo.lock""" ) )
SCREAMING_SNAKE_CASE__ : str = 0.01
with locka.acquire():
with pytest.raises(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[str] = time.time()
locka.acquire(__lowerCAmelCase )
assert time.time() - _start > timeout
def _lowercase ( __lowerCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__ : List[str] = """a""" * 1000 + """.lock"""
SCREAMING_SNAKE_CASE__ : Dict = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(__lowerCAmelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__lowerCAmelCase ):
locka.acquire(0 )
| 680 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
a :str = logging.getLogger(__name__)
def _lowercase ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = argparse.ArgumentParser(
description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" )
parser.add_argument("""--file_path""" , type=__lowerCAmelCase , default="""data/dump.txt""" , help="""The path to the data.""" )
parser.add_argument("""--tokenizer_type""" , type=__lowerCAmelCase , default="""bert""" , choices=["""bert""", """roberta""", """gpt2"""] )
parser.add_argument("""--tokenizer_name""" , type=__lowerCAmelCase , default="""bert-base-uncased""" , help="""The tokenizer to use.""" )
parser.add_argument("""--dump_file""" , type=__lowerCAmelCase , default="""data/dump""" , help="""The dump file prefix.""" )
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
SCREAMING_SNAKE_CASE__ : List[str] = BertTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE__ : str = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]`
SCREAMING_SNAKE_CASE__ : str = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]`
elif args.tokenizer_type == "roberta":
SCREAMING_SNAKE_CASE__ : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.special_tokens_map["""cls_token"""] # `<s>`
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.special_tokens_map["""sep_token"""] # `</s>`
elif args.tokenizer_type == "gpt2":
SCREAMING_SNAKE_CASE__ : List[Any] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>`
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path , """r""" , encoding="""utf8""" ) as fp:
SCREAMING_SNAKE_CASE__ : int = fp.readlines()
logger.info("""Start encoding""" )
logger.info(F'''{len(__lowerCAmelCase )} examples to process.''' )
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1_0000
SCREAMING_SNAKE_CASE__ : Dict = time.time()
for text in data:
SCREAMING_SNAKE_CASE__ : Dict = F'''{bos} {text.strip()} {sep}'''
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
rslt.append(__lowerCAmelCase )
iter += 1
if iter % interval == 0:
SCREAMING_SNAKE_CASE__ : str = time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
SCREAMING_SNAKE_CASE__ : Tuple = time.time()
logger.info("""Finished binarization""" )
logger.info(F'''{len(__lowerCAmelCase )} examples processed.''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.vocab_size
if vocab_size < (1 << 16):
SCREAMING_SNAKE_CASE__ : Tuple = [np.uintaa(__lowerCAmelCase ) for d in rslt]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [np.intaa(__lowerCAmelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(__lowerCAmelCase , """wb""" ) as handle:
pickle.dump(rslt_ , __lowerCAmelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 680 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : Dict = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Dict = "beit"
def __init__( self : List[Any] , lowerCAmelCase : str=8192 , lowerCAmelCase : Dict=768 , lowerCAmelCase : int=12 , lowerCAmelCase : Optional[Any]=12 , lowerCAmelCase : Optional[int]=3072 , lowerCAmelCase : Dict="gelu" , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Optional[Any]=0.02 , lowerCAmelCase : Union[str, Any]=1E-12 , lowerCAmelCase : Optional[Any]=224 , lowerCAmelCase : Union[str, Any]=16 , lowerCAmelCase : str=3 , lowerCAmelCase : str=False , lowerCAmelCase : str=False , lowerCAmelCase : int=False , lowerCAmelCase : List[str]=False , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : int=True , lowerCAmelCase : Any=[3, 5, 7, 11] , lowerCAmelCase : Union[str, Any]=[1, 2, 3, 6] , lowerCAmelCase : List[str]=True , lowerCAmelCase : str=0.4 , lowerCAmelCase : Optional[Any]=256 , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : Any=False , lowerCAmelCase : Any=255 , **lowerCAmelCase : Optional[Any] , )-> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = use_mask_token
UpperCAmelCase = use_absolute_position_embeddings
UpperCAmelCase = use_relative_position_bias
UpperCAmelCase = use_shared_relative_position_bias
UpperCAmelCase = layer_scale_init_value
UpperCAmelCase = drop_path_rate
UpperCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCAmelCase = out_indices
UpperCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCAmelCase = use_auxiliary_head
UpperCAmelCase = auxiliary_loss_weight
UpperCAmelCase = auxiliary_channels
UpperCAmelCase = auxiliary_num_convs
UpperCAmelCase = auxiliary_concat_input
UpperCAmelCase = semantic_loss_ignore_index
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Dict = version.parse("1.11" )
@property
def a__( self : str )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def a__( self : Optional[int] )-> float:
"""simple docstring"""
return 1E-4
| 714 |
'''simple docstring'''
import heapq
def lowerCamelCase__ ( A : dict ):
'''simple docstring'''
UpperCAmelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(A , [-1 * len(A ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase = heapq.heappop(A )[1][0]
chosen_vertices.add(A )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase = elem[1][1].index(A )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(A )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 50 | 0 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCamelCase ( unittest.TestCase ):
def __init__( self :Any , lowercase :List[str] , lowercase :List[str]=1_3 , lowercase :List[Any]=7 , lowercase :Tuple=True , lowercase :Optional[Any]=True , lowercase :Dict=True , lowercase :str=True , lowercase :Dict=9_9 , lowercase :Union[str, Any]=3_2 , lowercase :str=5 , lowercase :List[str]=4 , lowercase :Tuple=3_7 , lowercase :int="gelu" , lowercase :Any=0.1 , lowercase :str=0.1 , lowercase :List[str]=5_1_2 , lowercase :List[Any]=1_6 , lowercase :int=2 , lowercase :Optional[int]=0.02 , lowercase :List[str]=4 , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_attention_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_choices
def snake_case__ ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase ( __lowerCamelCase , unittest.TestCase ):
UpperCamelCase_ : str = True
UpperCamelCase_ : Union[str, Any] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self :Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = FlaxRoFormerModelTester(self )
@slow
def snake_case__ ( self :Any ) -> Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=lowercase )
SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase )
@require_flax
class lowerCamelCase ( unittest.TestCase ):
@slow
def snake_case__ ( self :str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
SCREAMING_SNAKE_CASE = jnp.array([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE = model(lowercase )[0]
SCREAMING_SNAKE_CASE = 5_0_0_0_0
SCREAMING_SNAKE_CASE = (1, 6, vocab_size)
self.assertEqual(output.shape , lowercase )
SCREAMING_SNAKE_CASE = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , lowercase , atol=1e-4 ) ) | 201 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowerCamelCase ( __lowerCamelCase ):
def __init__( self :int , lowercase :Optional[Any] , lowercase :Optional[int]=1_3 , lowercase :Any=7 , lowercase :Tuple=True , lowercase :Optional[int]=True , lowercase :Any=False , lowercase :Any=True , lowercase :Dict=9_9 , lowercase :Dict=3_2 , lowercase :Any=5 , lowercase :Optional[Any]=4 , lowercase :List[str]=6_4 , lowercase :Optional[int]="gelu" , lowercase :int=0.1 , lowercase :str=0.1 , lowercase :List[str]=5_1_2 , lowercase :int=1_6 , lowercase :Any=2 , lowercase :Union[str, Any]=0.02 , lowercase :Optional[int]=3 , lowercase :Optional[Any]=4 , lowercase :Tuple=None , lowercase :int=2 , lowercase :Tuple=2 , lowercase :List[Any]=2 , lowercase :Optional[int]=2 , lowercase :Tuple=4 , lowercase :int=1 , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = q_groups
SCREAMING_SNAKE_CASE = k_groups
SCREAMING_SNAKE_CASE = v_groups
SCREAMING_SNAKE_CASE = post_attention_groups
SCREAMING_SNAKE_CASE = intermediate_groups
SCREAMING_SNAKE_CASE = output_groups
def snake_case__ ( self :str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self :str ) -> Dict:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def snake_case__ ( self :Optional[Any] , lowercase :Optional[Any] , lowercase :int , lowercase :Any , lowercase :List[str] , lowercase :Optional[Any] , lowercase :List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = SqueezeBertModel(config=lowercase )
model.to(lowercase )
model.eval()
SCREAMING_SNAKE_CASE = model(lowercase , lowercase )
SCREAMING_SNAKE_CASE = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self :Dict , lowercase :Dict , lowercase :List[Any] , lowercase :str , lowercase :Union[str, Any] , lowercase :Dict , lowercase :List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = SqueezeBertForMaskedLM(config=lowercase )
model.to(lowercase )
model.eval()
SCREAMING_SNAKE_CASE = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self :List[str] , lowercase :Optional[Any] , lowercase :Optional[int] , lowercase :str , lowercase :int , lowercase :Optional[Any] , lowercase :int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = SqueezeBertForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
SCREAMING_SNAKE_CASE = model(
lowercase , attention_mask=lowercase , start_positions=lowercase , end_positions=lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self :Any , lowercase :Optional[Any] , lowercase :List[str] , lowercase :int , lowercase :Any , lowercase :Optional[int] , lowercase :List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = SqueezeBertForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
SCREAMING_SNAKE_CASE = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self :str , lowercase :List[Any] , lowercase :List[str] , lowercase :Optional[int] , lowercase :Tuple , lowercase :Tuple , lowercase :str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = SqueezeBertForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
SCREAMING_SNAKE_CASE = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self :int , lowercase :List[str] , lowercase :List[Any] , lowercase :Tuple , lowercase :str , lowercase :Optional[Any] , lowercase :Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = SqueezeBertForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = model(
lowercase , attention_mask=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self :List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = config_and_inputs
SCREAMING_SNAKE_CASE = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
UpperCamelCase_ : int = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase_ : int = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : int = True
UpperCamelCase_ : List[Any] = False
def snake_case__ ( self :Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowercase , dim=3_7 )
def snake_case__ ( self :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowercase )
def snake_case__ ( self :Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowercase )
def snake_case__ ( self :List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowercase )
def snake_case__ ( self :Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowercase )
def snake_case__ ( self :Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowercase )
def snake_case__ ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowercase )
@slow
def snake_case__ ( self :Dict ) -> str:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = SqueezeBertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def snake_case__ ( self :Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
SCREAMING_SNAKE_CASE = torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]] )
SCREAMING_SNAKE_CASE = model(lowercase )[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 3) )
self.assertEqual(output.shape , lowercase )
SCREAMING_SNAKE_CASE = torch.tensor([[0.64_01, -0.03_49, -0.60_41]] )
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-4 ) ) | 201 | 1 |
"""simple docstring"""
_lowercase : Optional[Any] = frozenset(
[
'prompt',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
_lowercase : int = frozenset(['prompt', 'negative_prompt'])
_lowercase : Tuple = frozenset([])
_lowercase : Tuple = frozenset(['image'])
_lowercase : Optional[int] = frozenset(
[
'image',
'height',
'width',
'guidance_scale',
]
)
_lowercase : Tuple = frozenset(['image'])
_lowercase : int = frozenset(
[
'prompt',
'image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
_lowercase : Optional[Any] = frozenset(['prompt', 'image', 'negative_prompt'])
_lowercase : Optional[Any] = frozenset(
[
# Text guided image variation with an image mask
'prompt',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
_lowercase : Optional[Any] = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt'])
_lowercase : List[Any] = frozenset(
[
# image variation with an image mask
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
_lowercase : Union[str, Any] = frozenset(['image', 'mask_image'])
_lowercase : List[str] = frozenset(
[
'example_image',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
_lowercase : List[Any] = frozenset(['example_image', 'image', 'mask_image'])
_lowercase : Union[str, Any] = frozenset(['class_labels'])
_lowercase : Optional[int] = frozenset(['class_labels'])
_lowercase : Optional[int] = frozenset(['batch_size'])
_lowercase : Optional[Any] = frozenset([])
_lowercase : Any = frozenset(['batch_size'])
_lowercase : int = frozenset([])
_lowercase : Optional[int] = frozenset(
[
'prompt',
'audio_length_in_s',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
_lowercase : str = frozenset(['prompt', 'negative_prompt'])
_lowercase : Union[str, Any] = frozenset(['input_tokens'])
_lowercase : int = frozenset(['input_tokens'])
| 397 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :list[int] ):
__UpperCAmelCase = len(snake_case_ ) // 2
# choose the middle 3 elements
__UpperCAmelCase = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 397 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
return math.sqrt(sum(pow(a - b, 2) for a, b in zip(snake_case, snake_case)))
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
if dataset.ndim != value_array.ndim:
__snake_case = (
'''Wrong input data\'s dimensions... '''
f"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(snake_case)
try:
if dataset.shape[1] != value_array.shape[1]:
__snake_case = (
'''Wrong input data\'s shape... '''
f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(snake_case)
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''')
if dataset.dtype != value_array.dtype:
__snake_case = (
'''Input data have different datatype... '''
f"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(snake_case)
__snake_case = []
for value in value_array:
__snake_case = euclidean(snake_case, dataset[0])
__snake_case = dataset[0].tolist()
for dataset_value in dataset[1:]:
__snake_case = euclidean(snake_case, snake_case)
if dist > temp_dist:
__snake_case = temp_dist
__snake_case = dataset_value.tolist()
answer.append([vector, dist])
return answer
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
return np.dot(snake_case, snake_case) / (norm(snake_case) * norm(snake_case))
if __name__ == "__main__":
import doctest
doctest.testmod() | 564 | """simple docstring"""
from math import sqrt
def SCREAMING_SNAKE_CASE ( snake_case = 1_00_00_00):
__snake_case = 0
__snake_case = 0
__snake_case = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2, 2 * max_cuboid_size + 1):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2).is_integer():
num_cuboids += (
min(snake_case, sum_shortest_sides // 2)
- max(1, sum_shortest_sides - max_cuboid_size)
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""") | 564 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : int = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Dict ,_lowerCamelCase : Dict=8 ) -> Any:
_lowerCAmelCase : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCAmelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Any=512 ,_lowerCamelCase : Dict=512 ) -> List[Any]:
_lowerCAmelCase : Any = pil_image.resize((w, h) ,resample=Image.BICUBIC ,reducing_gap=1 )
_lowerCAmelCase : Dict = np.array(pil_image.convert("""RGB""" ) )
_lowerCAmelCase : List[str] = arr.astype(np.floataa ) / 1_27.5 - 1
_lowerCAmelCase : int = np.transpose(_lowerCamelCase ,[2, 0, 1] )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ , a__ , ):
super().__init__()
self.register_modules(
unet=a__ , scheduler=a__ , movq=a__ , )
_lowerCAmelCase : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __A ( self , a__ , a__ , a__ ):
# get the original timestep using init_timestep
_lowerCAmelCase : Optional[Any] = min(int(num_inference_steps * strength ) , a__ )
_lowerCAmelCase : List[Any] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__=None ):
if not isinstance(a__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a__ )}" )
_lowerCAmelCase : Union[str, Any] = image.to(device=a__ , dtype=a__ )
_lowerCAmelCase : int = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCAmelCase : int = image
else:
if isinstance(a__ , a__ ) and len(a__ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(a__ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(a__ , a__ ):
_lowerCAmelCase : Optional[int] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a__ )
]
_lowerCAmelCase : Optional[int] = torch.cat(a__ , dim=0 )
else:
_lowerCAmelCase : List[Any] = self.movq.encode(a__ ).latent_dist.sample(a__ )
_lowerCAmelCase : Dict = self.movq.config.scaling_factor * init_latents
_lowerCAmelCase : str = torch.cat([init_latents] , dim=0 )
_lowerCAmelCase : Dict = init_latents.shape
_lowerCAmelCase : str = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__ )
# get latents
_lowerCAmelCase : Optional[Any] = self.scheduler.add_noise(a__ , a__ , a__ )
_lowerCAmelCase : int = init_latents
return latents
def __A ( self , a__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_lowerCAmelCase : str = torch.device(F"cuda:{gpu_id}" )
_lowerCAmelCase : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a__ , a__ )
def __A ( self , a__=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_lowerCAmelCase : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=a__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCAmelCase : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCAmelCase , _lowerCAmelCase : str = cpu_offload_with_hook(a__ , a__ , prev_module_hook=a__ )
# We'll offload the last model manually.
_lowerCAmelCase : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __A ( self ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a__ )
def __call__( self , a__ , a__ , a__ , a__ = 512 , a__ = 512 , a__ = 100 , a__ = 4.0 , a__ = 0.3 , a__ = 1 , a__ = None , a__ = "pil" , a__ = True , ):
_lowerCAmelCase : Dict = self._execution_device
_lowerCAmelCase : Optional[Any] = guidance_scale > 1.0
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = torch.cat(a__ , dim=0 )
_lowerCAmelCase : Dict = image_embeds.shape[0]
if isinstance(a__ , a__ ):
_lowerCAmelCase : List[Any] = torch.cat(a__ , dim=0 )
if do_classifier_free_guidance:
_lowerCAmelCase : int = image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Any = negative_image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a__ )
if not isinstance(a__ , a__ ):
_lowerCAmelCase : Any = [image]
if not all(isinstance(a__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"Input is in incorrect format: {[type(a__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
_lowerCAmelCase : Tuple = torch.cat([prepare_image(a__ , a__ , a__ ) for i in image] , dim=0 )
_lowerCAmelCase : Union[str, Any] = image.to(dtype=image_embeds.dtype , device=a__ )
_lowerCAmelCase : Union[str, Any] = self.movq.encode(a__ )["""latents"""]
_lowerCAmelCase : Tuple = latents.repeat_interleave(a__ , dim=0 )
self.scheduler.set_timesteps(a__ , device=a__ )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.get_timesteps(a__ , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCAmelCase , _lowerCAmelCase : Dict = downscale_height_and_width(a__ , a__ , self.movq_scale_factor )
_lowerCAmelCase : List[str] = self.prepare_latents(
a__ , a__ , a__ , a__ , image_embeds.dtype , a__ , a__ )
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase : int = {"""image_embeds""": image_embeds}
_lowerCAmelCase : List[str] = self.unet(
sample=a__ , timestep=a__ , encoder_hidden_states=a__ , added_cond_kwargs=a__ , return_dict=a__ , )[0]
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = noise_pred.chunk(2 )
_lowerCAmelCase , _lowerCAmelCase : Tuple = variance_pred.chunk(2 )
_lowerCAmelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCAmelCase : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCAmelCase , _lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : List[str] = self.scheduler.step(
a__ , a__ , a__ , generator=a__ , )[0]
# post-processing
_lowerCAmelCase : int = self.movq.decode(a__ , force_not_quantize=a__ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
_lowerCAmelCase : List[Any] = image * 0.5 + 0.5
_lowerCAmelCase : Any = image.clamp(0 , 1 )
_lowerCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase : List[str] = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
| 663 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 1 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowerCamelCase =logging.get_logger(__name__)
class _lowerCamelCase ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 285 |
from math import factorial, pi
def lowerCamelCase_ ( UpperCAmelCase_ : float , UpperCAmelCase_ : int = 30 ):
if not isinstance(UpperCAmelCase_ , (int, float) ):
raise ValueError('''maclaurin_sin() requires either an int or float for theta''' )
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or accuracy <= 0:
raise ValueError('''maclaurin_sin() requires a positive int for accuracy''' )
lowercase : Tuple = float(UpperCAmelCase_ )
lowercase : List[str] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(UpperCAmelCase_ ) )
def lowerCamelCase_ ( UpperCAmelCase_ : float , UpperCAmelCase_ : int = 30 ):
if not isinstance(UpperCAmelCase_ , (int, float) ):
raise ValueError('''maclaurin_cos() requires either an int or float for theta''' )
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or accuracy <= 0:
raise ValueError('''maclaurin_cos() requires a positive int for accuracy''' )
lowercase : Optional[Any] = float(UpperCAmelCase_ )
lowercase : Dict = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(UpperCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 583 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class A_ ( __UpperCamelCase ):
'''simple docstring'''
@slow
@require_torch
def _snake_case ( self: Dict ):
__lowerCamelCase : Any = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
__lowerCamelCase : Optional[Any] = BertTokenizer.from_pretrained('bert-base-uncased' )
__lowerCamelCase : Tuple = bertabert.config.encoder.vocab_size
__lowerCamelCase : Optional[int] = tokenizer.sep_token_id
__lowerCamelCase : Dict = tokenizer.cls_token_id
__lowerCamelCase : Dict = 128
__lowerCamelCase : Any = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
__lowerCamelCase : Union[str, Any] = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
__lowerCamelCase : Union[str, Any] = train_dataset.select(range(32 ) )
__lowerCamelCase : Optional[int] = val_dataset.select(range(16 ) )
__lowerCamelCase : str = 4
def _map_to_encoder_decoder_inputs(a: int ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowerCamelCase : Optional[int] = tokenizer(batch['article'] , padding='max_length' , truncation=a , max_length=512 )
__lowerCamelCase : Union[str, Any] = tokenizer(batch['highlights'] , padding='max_length' , truncation=a , max_length=128 )
__lowerCamelCase : List[str] = inputs.input_ids
__lowerCamelCase : Tuple = inputs.attention_mask
__lowerCamelCase : Optional[Any] = outputs.input_ids
__lowerCamelCase : int = outputs.input_ids.copy()
__lowerCamelCase : List[str] = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
__lowerCamelCase : Optional[int] = outputs.attention_mask
assert all(len(a ) == 512 for x in inputs.input_ids )
assert all(len(a ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(a: Any ):
__lowerCamelCase : List[str] = pred.label_ids
__lowerCamelCase : Tuple = pred.predictions
# all unnecessary tokens are removed
__lowerCamelCase : Optional[int] = tokenizer.batch_decode(a , skip_special_tokens=a )
__lowerCamelCase : Tuple = tokenizer.batch_decode(a , skip_special_tokens=a )
__lowerCamelCase : str = sum([int(pred_str[i] == label_str[i] ) for i in range(len(a ) )] ) / len(a )
return {"accuracy": accuracy}
# map train dataset
__lowerCamelCase : Optional[int] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=a , batch_size=a , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
__lowerCamelCase : Dict = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=a , batch_size=a , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
__lowerCamelCase : str = self.get_auto_remove_tmp_dir()
__lowerCamelCase : Dict = SeqaSeqTrainingArguments(
output_dir=a , per_device_train_batch_size=a , per_device_eval_batch_size=a , predict_with_generate=a , evaluation_strategy='steps' , do_train=a , do_eval=a , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowerCamelCase : Any = SeqaSeqTrainer(
model=a , args=a , compute_metrics=_compute_metrics , train_dataset=a , eval_dataset=a , tokenizer=a , )
# start training
trainer.train()
| 230 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 230 | 1 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__UpperCamelCase : str = 'http://www.mocksite.com/file1.txt'
__UpperCamelCase : Dict = '"text": ["foo", "foo"]'
__UpperCamelCase : Tuple = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class lowercase__ :
UpperCamelCase_ = 200
UpperCamelCase_ = {'Content-Length': '100'}
UpperCamelCase_ = {}
def __A ( self : List[str] , **UpperCamelCase__ : Any ):
'''simple docstring'''
return [bytes(UpperCamelCase__ , '''utf-8''' )]
def A ( *_lowercase , **_lowercase ):
return MockResponse()
@pytest.mark.parametrize('''urls_type''' , [str, list, dict] )
def A ( _lowercase , _lowercase , _lowercase ):
import requests
monkeypatch.setattr(UpperCAmelCase__ , '''request''' , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = URL
if issubclass(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE : List[str] = url
elif issubclass(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = [url]
elif issubclass(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE : Tuple = {'''train''': url}
SCREAMING_SNAKE_CASE : Optional[int] = '''dummy'''
SCREAMING_SNAKE_CASE : int = '''downloads'''
SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path
SCREAMING_SNAKE_CASE : Optional[int] = DownloadConfig(
cache_dir=os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , use_etag=UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE : str = DownloadManager(dataset_name=UpperCAmelCase__ , download_config=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = dl_manager.download(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE : Optional[int] = [downloaded_paths]
SCREAMING_SNAKE_CASE : int = [urls]
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
assert "train" in downloaded_paths.keys()
SCREAMING_SNAKE_CASE : Any = downloaded_paths.values()
SCREAMING_SNAKE_CASE : int = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
SCREAMING_SNAKE_CASE : Tuple = Path(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
SCREAMING_SNAKE_CASE : List[str] = downloaded_path.read_text()
assert content == CONTENT
SCREAMING_SNAKE_CASE : Optional[Any] = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
SCREAMING_SNAKE_CASE : List[str] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' , [str, list, dict] )
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : str = str(UpperCAmelCase__ )
if issubclass(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = filename
elif issubclass(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE : Optional[int] = [filename]
elif issubclass(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''train''': filename}
SCREAMING_SNAKE_CASE : List[str] = '''dummy'''
SCREAMING_SNAKE_CASE : Dict = xz_file.parent
SCREAMING_SNAKE_CASE : int = '''extracted'''
SCREAMING_SNAKE_CASE : Tuple = DownloadConfig(
cache_dir=UpperCAmelCase__ , use_etag=UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE : Optional[Any] = DownloadManager(dataset_name=UpperCAmelCase__ , download_config=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = dl_manager.extract(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = paths
for extracted_paths in [extracted_paths]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE : Dict = [extracted_paths]
SCREAMING_SNAKE_CASE : int = [paths]
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
assert "train" in extracted_paths.keys()
SCREAMING_SNAKE_CASE : Tuple = extracted_paths.values()
SCREAMING_SNAKE_CASE : Dict = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
SCREAMING_SNAKE_CASE : Union[str, Any] = Path(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = extracted_path.parts
assert parts[-1] == hash_url_to_filename(UpperCAmelCase__ , etag=UpperCAmelCase__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
SCREAMING_SNAKE_CASE : List[str] = extracted_path.read_text()
SCREAMING_SNAKE_CASE : Tuple = text_file.read_text()
assert extracted_file_content == expected_file_content
def A ( _lowercase , _lowercase ):
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(UpperCAmelCase__ , start=1 ):
SCREAMING_SNAKE_CASE : List[str] = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = request.getfixturevalue(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : int = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(UpperCAmelCase__ ) , start=1 ):
_test_jsonl(UpperCAmelCase__ , UpperCAmelCase__ )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[str] = request.getfixturevalue(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(UpperCAmelCase__ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(UpperCAmelCase__ ) , start=1 ):
_test_jsonl(UpperCAmelCase__ , UpperCAmelCase__ )
assert num_tar == 1
assert num_jsonl == 2
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Optional[int] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(UpperCAmelCase__ ) , start=1 ):
assert os.path.basename(UpperCAmelCase__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 248 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCAmelCase_ ( ):
lowercase_ = """https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"""
lowercase_ = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw ).convert("""RGB""" )
return image
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.embeddings.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.embeddings.layernorm.bias""") )
# fmt: on
return rename_keys
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = dct.pop(UpperCAmelCase__ )
lowercase_ = val
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowercase_ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
lowercase_ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
lowercase_ = torch.cat((q_bias, torch.zeros_like(UpperCAmelCase__ , requires_grad=UpperCAmelCase__ ), v_bias) )
lowercase_ = qkv_bias
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = 3_6_4 if """coco""" in model_name else 2_2_4
lowercase_ = InstructBlipVisionConfig(image_size=UpperCAmelCase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
lowercase_ = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowercase_ = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
lowercase_ = LlamaConfig.from_pretrained("""decapoda-research/llama-7b-hf""" , vocab_size=3_2_0_0_1 ).to_dict()
elif "vicuna-13b" in model_name:
lowercase_ = LlamaConfig.from_pretrained("""decapoda-research/llama-13b-hf""" , vocab_size=3_2_0_0_1 ).to_dict()
else:
raise ValueError("""Model name not supported""" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
lowercase_ = InstructBlipQFormerConfig(vocab_size=3_0_5_2_3 ).to_dict()
lowercase_ = InstructBlipConfig(vision_config=UpperCAmelCase__ , text_config=UpperCAmelCase__ , qformer_config=UpperCAmelCase__ )
return config, image_size
@torch.no_grad()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=False ):
lowercase_ = AutoTokenizer.from_pretrained("""bert-base-uncased""" , truncation_side="""left""" )
qformer_tokenizer.add_special_tokens({"""bos_token""": """[DEC]"""} )
if "t5" in model_name:
lowercase_ = TaTokenizerFast.from_pretrained("""google/flan-t5-xl""" , truncation_side="""left""" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
lowercase_ = LlamaTokenizerFast.from_pretrained(
"""huggyllama/llama-7b""" , truncation_side="""left""" , bos_token="""</s>""" , unk_token="""</s>""" )
tokenizer.add_special_tokens({"""pad_token""": """[PAD]"""} )
lowercase_ , lowercase_ = get_blipa_config(UpperCAmelCase__ )
lowercase_ = InstructBlipForConditionalGeneration(UpperCAmelCase__ ).eval()
lowercase_ = {
"""instructblip-vicuna-7b""": ("""blip2_vicuna_instruct""", """vicuna7b"""),
"""instructblip-vicuna-13b""": ("""blip2_vicuna_instruct""", """vicuna13b"""),
"""instructblip-flan-t5-xl""": ("""blip2_t5_instruct""", """flant5xl"""),
"""instructblip-flan-t5-xxl""": ("""blip2_t5_instruct""", """flant5xxl"""),
}
lowercase_ , lowercase_ = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
lowercase_ = """cuda:1""" if torch.cuda.is_available() else """cpu"""
lowercase_ = """cuda:2""" if torch.cuda.is_available() else """cpu"""
lowercase_ , lowercase_ , lowercase_ = load_model_and_preprocess(
name=UpperCAmelCase__ , model_type=UpperCAmelCase__ , is_eval=UpperCAmelCase__ , device=UpperCAmelCase__ )
original_model.eval()
print("""Done!""" )
# update state dict keys
lowercase_ = original_model.state_dict()
lowercase_ = create_rename_keys(UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowercase_ = state_dict.pop(UpperCAmelCase__ )
if key.startswith("""Qformer.bert""" ):
lowercase_ = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
lowercase_ = key.replace("""self""" , """attention""" )
if "llm_proj" in key:
lowercase_ = key.replace("""llm_proj""" , """language_projection""" )
if "t5_proj" in key:
lowercase_ = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""llm_model""" ):
lowercase_ = key.replace("""llm_model""" , """language_model""" )
if key.startswith("""t5""" ):
lowercase_ = key.replace("""t5""" , """language""" )
lowercase_ = val
# read in qv biases
read_in_q_v_bias(UpperCAmelCase__ , UpperCAmelCase__ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
lowercase_ = load_demo_image()
lowercase_ = """What is unusual about this image?"""
# create processor
lowercase_ = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=UpperCAmelCase__ , image_std=UpperCAmelCase__ )
lowercase_ = InstructBlipProcessor(
image_processor=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , qformer_tokenizer=UpperCAmelCase__ , )
lowercase_ = processor(images=UpperCAmelCase__ , text=UpperCAmelCase__ , return_tensors="""pt""" ).to(UpperCAmelCase__ )
# make sure processor creates exact same pixel values
lowercase_ = vis_processors["""eval"""](UpperCAmelCase__ ).unsqueeze(0 ).to(UpperCAmelCase__ )
lowercase_ = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , UpperCAmelCase__ )
original_model.to(UpperCAmelCase__ )
hf_model.to(UpperCAmelCase__ )
with torch.no_grad():
if "vicuna" in model_name:
lowercase_ = original_model({"""image""": original_pixel_values, """text_input""": [prompt]} ).logits
lowercase_ = hf_model(**UpperCAmelCase__ ).logits
else:
lowercase_ = original_model(
{"""image""": original_pixel_values, """text_input""": [prompt], """text_output""": ["""\n"""]} ).logits
lowercase_ = tokenizer("""\n""" , return_tensors="""pt""" ).input_ids.to(UpperCAmelCase__ )
lowercase_ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_0_0 )
lowercase_ = hf_model(**UpperCAmelCase__ , labels=UpperCAmelCase__ ).logits
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
lowercase_ = 1e-4 if """vicuna""" in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , UpperCAmelCase__ , atol=UpperCAmelCase__ )
print("""Looks ok!""" )
print("""Generating with original model...""" )
lowercase_ = original_model.generate({"""image""": original_pixel_values, """prompt""": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("""Generating with HF model...""" )
lowercase_ = hf_model.generate(
**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , num_beams=5 , max_length=2_5_6 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
lowercase_ = 2
print("""Original generation:""" , UpperCAmelCase__ )
lowercase_ = processor.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
lowercase_ = [text.strip() for text in output_text]
print("""HF generation:""" , UpperCAmelCase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCAmelCase__ )
hf_model.save_pretrained(UpperCAmelCase__ )
if push_to_hub:
processor.push_to_hub(F'''Salesforce/{model_name}''' )
hf_model.push_to_hub(F'''Salesforce/{model_name}''' )
if __name__ == "__main__":
a = argparse.ArgumentParser()
a = [
'instructblip-vicuna-7b',
'instructblip-vicuna-13b',
'instructblip-flan-t5-xl',
'instructblip-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='instructblip-flan-t5-xl',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
a = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 412 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case_ : str ={
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict =['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] =[
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] =[
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
snake_case_ : List[str] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 205 |
class a__ :
def __init__( self ) -> str:
__A = 0
__A = 0
__A = {}
def _lowerCamelCase ( self , lowercase__ ) -> List[Any]:
if vertex not in self.adjacency:
__A = {}
self.num_vertices += 1
def _lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
self.add_vertex(lowercase__ )
self.add_vertex(lowercase__ )
if head == tail:
return
__A = weight
__A = weight
def _lowerCamelCase ( self ) -> List[str]:
__A = self.get_edges()
for edge in edges:
__A , __A , __A = edge
edges.remove((tail, head, weight) )
for i in range(len(lowercase__ ) ):
__A = list(edges[i] )
edges.sort(key=lambda lowercase__ : e[2] )
for i in range(len(lowercase__ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__A = edges[i][2] + 1
for edge in edges:
__A , __A , __A = edge
__A = weight
__A = weight
def __str__( self ) -> Union[str, Any]:
__A = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
__A = self.adjacency[head][tail]
string += F"""{head} -> {tail} == {weight}\n"""
return string.rstrip("\n" )
def _lowerCamelCase ( self ) -> Union[str, Any]:
__A = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def _lowerCamelCase ( self ) -> Tuple:
return self.adjacency.keys()
@staticmethod
def _lowerCamelCase ( lowercase__=None , lowercase__=None ) -> Any:
__A = Graph()
if vertices is None:
__A = []
if edges is None:
__A = []
for vertex in vertices:
g.add_vertex(lowercase__ )
for edge in edges:
g.add_edge(*lowercase__ )
return g
class a__ :
def __init__( self ) -> List[str]:
__A = {}
__A = {}
def __len__( self ) -> Union[str, Any]:
return len(self.parent )
def _lowerCamelCase ( self , lowercase__ ) -> Any:
if item in self.parent:
return self.find(lowercase__ )
__A = item
__A = 0
return item
def _lowerCamelCase ( self , lowercase__ ) -> str:
if item not in self.parent:
return self.make_set(lowercase__ )
if item != self.parent[item]:
__A = self.find(self.parent[item] )
return self.parent[item]
def _lowerCamelCase ( self , lowercase__ , lowercase__ ) -> List[Any]:
__A = self.find(lowercase__ )
__A = self.find(lowercase__ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__A = roota
return roota
if self.rank[roota] < self.rank[roota]:
__A = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__A = roota
return roota
return None
@staticmethod
def _lowerCamelCase ( lowercase__ ) -> Any:
__A = graph.num_vertices
__A = Graph.UnionFind()
__A = []
while num_components > 1:
__A = {}
for vertex in graph.get_vertices():
__A = -1
__A = graph.get_edges()
for edge in edges:
__A , __A , __A = edge
edges.remove((tail, head, weight) )
for edge in edges:
__A , __A , __A = edge
__A = union_find.find(lowercase__ )
__A = union_find.find(lowercase__ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__A = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__A = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__A , __A , __A = cheap_edge[vertex]
if union_find.find(lowercase__ ) != union_find.find(lowercase__ ):
union_find.union(lowercase__ , lowercase__ )
mst_edges.append(cheap_edge[vertex] )
__A = num_components - 1
__A = Graph.build(edges=lowercase__ )
return mst
| 205 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=A__ )
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowerCamelCase__ = Features({"""image""": Image()} )
lowerCamelCase__ = Features({"""labels""": ClassLabel} )
lowerCamelCase__ = "image"
lowerCamelCase__ = "labels"
def A ( self : Optional[Any] , __snake_case : str ) -> Tuple:
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , __snake_case ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
UpperCAmelCase : int = copy.deepcopy(self )
UpperCAmelCase : List[str] = self.label_schema.copy()
UpperCAmelCase : Tuple = features[self.label_column]
UpperCAmelCase : List[Any] = label_schema
return task_template
@property
def A ( self : Optional[Any] ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 127 |
'''simple docstring'''
def snake_case_ ( _lowerCAmelCase : str ) -> int:
if not head:
return True
# split the list to two parts
UpperCAmelCase , UpperCAmelCase : str = head.next, head
while fast and fast.next:
UpperCAmelCase : Optional[Any] = fast.next.next
UpperCAmelCase : Dict = slow.next
UpperCAmelCase : List[Any] = slow.next
UpperCAmelCase : int = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase : Optional[Any] = None
while second:
UpperCAmelCase : Any = second.next
UpperCAmelCase : Union[str, Any] = node
UpperCAmelCase : List[str] = second
UpperCAmelCase : List[Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase : List[Any] = node.next
UpperCAmelCase : Union[str, Any] = head.next
return True
def snake_case_ ( _lowerCAmelCase : str ) -> Tuple:
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase : Any = head
while fast and fast.next:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase : List[str] = [slow.val]
while slow.next:
UpperCAmelCase : Tuple = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase : int = cur.next
return True
def snake_case_ ( _lowerCAmelCase : List[str] ) -> Optional[Any]:
if not head or not head.next:
return True
UpperCAmelCase : List[str] = {}
UpperCAmelCase : int = 0
while head:
if head.val in d:
d[head.val].append(_lowerCAmelCase )
else:
UpperCAmelCase : List[str] = [pos]
UpperCAmelCase : Dict = head.next
pos += 1
UpperCAmelCase : Optional[Any] = pos - 1
UpperCAmelCase : Dict = 0
for v in d.values():
if len(_lowerCAmelCase ) % 2 != 0:
middle += 1
else:
UpperCAmelCase : int = 0
for i in range(0 , len(_lowerCAmelCase ) ):
if v[i] + v[len(_lowerCAmelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 127 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = num_of_nodes
SCREAMING_SNAKE_CASE : list[list[int]] = []
SCREAMING_SNAKE_CASE : dict[int, int] = {}
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
SCREAMING_SNAKE_CASE : List[Any] = self.find_component(A )
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
SCREAMING_SNAKE_CASE : int = v_node
component_size[v_node] += component_size[u_node]
self.set_component(A )
elif component_size[u_node] >= component_size[v_node]:
SCREAMING_SNAKE_CASE : Tuple = self.find_component(A )
component_size[u_node] += component_size[v_node]
self.set_component(A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
SCREAMING_SNAKE_CASE : str = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = edge
SCREAMING_SNAKE_CASE : int = self.m_component[u]
SCREAMING_SNAKE_CASE : Tuple = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
SCREAMING_SNAKE_CASE : List[Any] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(A, A ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = edge
SCREAMING_SNAKE_CASE : List[str] = self.m_component[u]
SCREAMING_SNAKE_CASE : List[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(A, A, A )
print(F"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
SCREAMING_SNAKE_CASE : List[str] = [-1] * self.m_num_of_nodes
print(F"The total weight of the minimal spanning tree is: {mst_weight}" )
def lowercase__( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 508 |
'''simple docstring'''
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A, 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(A, 'num_attention_heads' ) )
class _a :
'''simple docstring'''
def __init__( self, A, A=13, A=64, A=3, A=3, A=2, A=1, A=16, A=[128, 256, 384], A=[4, 6, 8], A=[2, 3, 4], A=[16, 16, 16], A=0, A=[2, 2, 2], A=[2, 2, 2], A=0.02, A=True, A=True, A=2, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : List[Any] = image_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : Tuple = kernel_size
SCREAMING_SNAKE_CASE : Tuple = stride
SCREAMING_SNAKE_CASE : Union[str, Any] = padding
SCREAMING_SNAKE_CASE : int = hidden_sizes
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = depths
SCREAMING_SNAKE_CASE : int = key_dim
SCREAMING_SNAKE_CASE : List[str] = drop_path_rate
SCREAMING_SNAKE_CASE : int = patch_size
SCREAMING_SNAKE_CASE : Tuple = attention_ratio
SCREAMING_SNAKE_CASE : Tuple = mlp_ratio
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE : List[str] = use_labels
SCREAMING_SNAKE_CASE : List[Any] = num_labels
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size], self.num_labels )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size, num_channels=self.num_channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, patch_size=self.patch_size, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, depths=self.depths, key_dim=self.key_dim, drop_path_rate=self.drop_path_rate, mlp_ratio=self.mlp_ratio, attention_ratio=self.attention_ratio, initializer_range=self.initializer_range, down_ops=self.down_ops, )
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = LevitModel(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = model(A )
SCREAMING_SNAKE_CASE : Tuple = (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE : int = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
SCREAMING_SNAKE_CASE : int = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]), )
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = LevitForImageClassification(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = config_and_inputs
SCREAMING_SNAKE_CASE : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Any = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
A : Tuple = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
A : Any = False
A : Union[str, Any] = False
A : int = False
A : int = False
A : int = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = LevitModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self, config_class=A, has_text_modality=A, hidden_size=37 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self ):
'''simple docstring'''
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not output attentions' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(A )
SCREAMING_SNAKE_CASE : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Tuple = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
def check_hidden_states_output(A, A, A ):
SCREAMING_SNAKE_CASE : Tuple = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**self._prepare_for_class(A, A ) )
SCREAMING_SNAKE_CASE : List[Any] = outputs.hidden_states
SCREAMING_SNAKE_CASE : List[str] = len(self.model_tester.depths ) + 1
self.assertEqual(len(A ), A )
SCREAMING_SNAKE_CASE : Optional[Any] = (self.model_tester.image_size, self.model_tester.image_size)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE : Optional[int] = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [
height * width,
self.model_tester.hidden_sizes[0],
], )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[int] = True
check_hidden_states_output(A, A, A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(A, A, A )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self, A, A, A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = super()._prepare_for_class(A, A, return_labels=A )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Optional[Any] = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(A )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A )
model.to(A )
model.train()
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(A, A, return_labels=A )
SCREAMING_SNAKE_CASE : Any = model(**A ).loss
loss.backward()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Optional[int] = True
for model_class in self.all_model_classes:
if model_class in get_values(A ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE : List[str] = model_class(A )
model.gradient_checkpointing_enable()
model.to(A )
model.train()
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(A, A, return_labels=A )
SCREAMING_SNAKE_CASE : int = model(**A ).loss
loss.backward()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Dict = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(A ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
SCREAMING_SNAKE_CASE : str = problem_type['title']
SCREAMING_SNAKE_CASE : int = problem_type['num_labels']
SCREAMING_SNAKE_CASE : Any = model_class(A )
model.to(A )
model.train()
SCREAMING_SNAKE_CASE : Optional[Any] = self._prepare_for_class(A, A, return_labels=A )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE : Optional[int] = inputs['labels'].unsqueeze(1 ).repeat(1, problem_type['num_labels'] )
SCREAMING_SNAKE_CASE : Optional[Any] = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=A ) as warning_list:
SCREAMING_SNAKE_CASE : str = model(**A ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : int = LevitModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
A )
SCREAMING_SNAKE_CASE : List[str] = self.default_image_processor
SCREAMING_SNAKE_CASE : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Tuple = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**A )
# verify the logits
SCREAMING_SNAKE_CASE : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, A )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([1.04_48, -0.37_45, -1.83_17] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1E-4 ) )
| 508 | 1 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
lowerCAmelCase__ :Union[str, Any] = AutoTokenizer.from_pretrained("google/mt5-small" )
lowerCAmelCase__ :List[Any] = tokenizer("Hello there" , return_tensors="np" ).input_ids
lowerCAmelCase__ :List[Any] = tokenizer("Hi I am" , return_tensors="np" ).input_ids
lowerCAmelCase__ :str = shift_tokens_right(_lowerCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
lowerCAmelCase__ :Optional[Any] = model(_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase ).logits
lowerCAmelCase__ :Dict = optax.softmax_cross_entropy(_lowerCAmelCase , onehot(_lowerCAmelCase , logits.shape[-1] ) ).mean()
lowerCAmelCase__ :List[Any] = -(labels.shape[-1] * loss.item())
lowerCAmelCase__ :List[Any] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 145 |
from __future__ import annotations
import math
def snake_case__ ( UpperCAmelCase : int ):
if num <= 0:
lowerCAmelCase__ :Optional[Any] = F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(UpperCAmelCase )
lowerCAmelCase__ :int = [True] * (num + 1)
lowerCAmelCase__ :int = []
lowerCAmelCase__ :List[Any] = 2
lowerCAmelCase__ :List[Any] = int(math.sqrt(UpperCAmelCase ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(UpperCAmelCase )
# Set multiples of start be False
for i in range(start * start , num + 1 , UpperCAmelCase ):
if sieve[i] is True:
lowerCAmelCase__ :List[str] = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(UpperCAmelCase )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 145 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __a ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = "trocr"
SCREAMING_SNAKE_CASE = ["past_key_values"]
SCREAMING_SNAKE_CASE = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self : Union[str, Any] , snake_case_ : Any=5_02_65 , snake_case_ : Dict=10_24 , snake_case_ : int=12 , snake_case_ : str=16 , snake_case_ : List[Any]=40_96 , snake_case_ : List[Any]="gelu" , snake_case_ : Tuple=5_12 , snake_case_ : int=0.1 , snake_case_ : Optional[Any]=0.0 , snake_case_ : Optional[Any]=0.0 , snake_case_ : Optional[int]=2 , snake_case_ : List[str]=0.0_2 , snake_case_ : Optional[int]=0.0 , snake_case_ : Optional[int]=True , snake_case_ : Optional[Any]=False , snake_case_ : Tuple=True , snake_case_ : Any=True , snake_case_ : Any=1 , snake_case_ : Tuple=0 , snake_case_ : Dict=2 , **snake_case_ : int , )-> List[str]:
__lowerCAmelCase =vocab_size
__lowerCAmelCase =d_model
__lowerCAmelCase =decoder_layers
__lowerCAmelCase =decoder_attention_heads
__lowerCAmelCase =decoder_ffn_dim
__lowerCAmelCase =activation_function
__lowerCAmelCase =max_position_embeddings
__lowerCAmelCase =dropout
__lowerCAmelCase =attention_dropout
__lowerCAmelCase =activation_dropout
__lowerCAmelCase =init_std
__lowerCAmelCase =decoder_layerdrop
__lowerCAmelCase =use_cache
__lowerCAmelCase =scale_embedding
__lowerCAmelCase =use_learned_position_embeddings
__lowerCAmelCase =layernorm_embedding
super().__init__(
pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , **snake_case_ , )
| 456 |
def __lowerCAmelCase ( __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 ) -> int:
__lowerCAmelCase =right or len(__lowerCamelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__lowerCamelCase , __lowerCamelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 456 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_lowercase = False
class _UpperCAmelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def snake_case_ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self):
A__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa)
pipe.to(a__)
pipe.set_progress_bar_config(disable=a__)
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''')
A__ = torch.manual_seed(0)
A__ = pipe.dual_guided(
prompt='''first prompt''' , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a__)
A__ = VersatileDiffusionPipeline.from_pretrained(a__ , torch_dtype=torch.floataa)
pipe.to(a__)
pipe.set_progress_bar_config(disable=a__)
A__ = generator.manual_seed(0)
A__ = pipe.dual_guided(
prompt='''first prompt''' , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image).sum() < 1e-5, "Models don't have the same forward pass"
def snake_case_ ( self):
A__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa)
pipe.to(a__)
pipe.set_progress_bar_config(disable=a__)
A__ = '''cyberpunk 2077'''
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''')
A__ = torch.manual_seed(0)
A__ = pipe.dual_guided(
prompt=a__ , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images
A__ = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
A__ = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
A__ = '''A painting of a squirrel eating a burger '''
A__ = torch.manual_seed(0)
A__ = pipe.text_to_image(
prompt=a__ , generator=a__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''').images
A__ = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
A__ = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
A__ = pipe.image_variation(a__ , generator=a__ , output_type='''numpy''').images
A__ = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
A__ = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
| 632 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[Any] )-> Union[str, Any]:
A__ = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
A__ = re.match(r'''^mobilenet_v1_([^_]*)_([^_]*)$''' , UpperCamelCase_ )
if matches:
A__ = float(matches[1] )
A__ = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
A__ = 1_0_0_1
A__ = '''imagenet-1k-id2label.json'''
A__ = '''huggingface/label-files'''
A__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type='''dataset''' ) , '''r''' ) )
A__ = {int(UpperCamelCase_ ) + 1: v for k, v in idalabel.items()}
A__ = '''background'''
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase__ ( )-> Tuple:
A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : List[str]=False )-> Tuple:
A__ = get_mobilenet_va_config(UpperCamelCase_ )
# Load 🤗 model
A__ = MobileNetVaForImageClassification(UpperCamelCase_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
A__ = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 3_2} , )
A__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
A__ = model(**UpperCamelCase_ )
A__ = outputs.logits
assert logits.shape == (1, 1_0_0_1)
if model_name == "mobilenet_v1_1.0_224":
A__ = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
A__ = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
A__ = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , UpperCamelCase_ , atol=1E-4 )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
print('''Pushing to the hub...''' )
A__ = '''google/''' + model_name
image_processor.push_to_hub(UpperCamelCase_ )
model.push_to_hub(UpperCamelCase_ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowercase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 632 | 1 |
import os
lowercase : int = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00}
def lowerCAmelCase__ ( _a : Any ):
snake_case_ : int = 0
snake_case_ : int = 0
while index < len(a__ ) - 1:
snake_case_ : Dict = SYMBOLS[numerals[index]]
snake_case_ : Dict = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def lowerCAmelCase__ ( _a : List[Any] ):
snake_case_ : List[Any] = ""
snake_case_ : Dict = num // 10_00
numerals += m_count * "M"
num %= 10_00
snake_case_ : List[str] = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
snake_case_ : Tuple = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def lowerCAmelCase__ ( _a : Union[str, Any] = "/p089_roman.txt" ):
snake_case_ : int = 0
with open(os.path.dirname(a__ ) + roman_numerals_filename ) as filea:
snake_case_ : List[Any] = filea.readlines()
for line in lines:
snake_case_ : int = line.strip()
snake_case_ : int = parse_roman_numerals(a__ )
snake_case_ : List[Any] = generate_roman_numerals(a__ )
savings += len(a__ ) - len(a__ )
return savings
if __name__ == "__main__":
print(F"""{solution() = }""")
| 714 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase : Tuple = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
lowercase : Union[str, Any] = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def lowerCAmelCase__ ( _a : Union[str, Any] , _a : Dict , _a : List[str] ):
snake_case_ : Optional[int] = SavedModel()
snake_case_ : Any = []
with open(os.path.join(_a , "utils" , "tf_ops" , "onnx.json" ) ) as f:
snake_case_ : int = json.load(_a )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_a )] )
with open(_a , "rb" ) as f:
saved_model.ParseFromString(f.read() )
snake_case_ : str = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
snake_case_ : int = sorted(_a )
snake_case_ : Optional[int] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_a )
if strict and len(_a ) > 0:
raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(_a ) > 0:
print(F'''Found the following incompatible ops for the opset {opset}:''' )
print(*_a , sep="\n" )
else:
print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
lowercase : int = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 114 | 0 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = IFInpaintingSuperResolutionPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCamelCase_ ( self : List[str] ):
return self._get_superresolution_dummy_components()
def lowerCamelCase_ ( self : str,__A : List[str],__A : List[str]=0 ):
if str(__A ).startswith("mps" ):
_lowerCamelCase : List[str] = torch.manual_seed(__A )
else:
_lowerCamelCase : Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 1_6, 1_6),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Any = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Tuple = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Dict = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),reason="XFormers attention is only available with CUDA and `xformers` installed",)
def lowerCamelCase_ ( self : Optional[int] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda",reason="float16 requires CUDA" )
def lowerCamelCase_ ( self : Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCamelCase_ ( self : Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_local()
def lowerCamelCase_ ( self : Any ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2,) | 44 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
snake_case_ : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(lowercase )
class lowercase__ ( lowercase ):
def __init__( self : Tuple ,**lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
# No specific FOR_XXX available yet
def __call__( self : Dict ,lowerCamelCase__ : Union[np.ndarray, bytes, str] ,**lowerCamelCase__ : Any ):
'''simple docstring'''
return super().__call__(lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : int ,**lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = {}
if "candidate_labels" in kwargs:
_UpperCamelCase : Any = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_UpperCamelCase : Any = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : str=None ,lowerCamelCase__ : List[Any]="This is a sound of {}." ):
'''simple docstring'''
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
if audio.startswith('http://' ) or audio.startswith('https://' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_UpperCamelCase : Optional[Any] = requests.get(lowerCamelCase__ ).content
else:
with open(lowerCamelCase__ ,'rb' ) as f:
_UpperCamelCase : List[str] = f.read()
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : int = ffmpeg_read(lowerCamelCase__ ,self.feature_extractor.sampling_rate )
if not isinstance(lowerCamelCase__ ,np.ndarray ):
raise ValueError('We expect a numpy ndarray as input' )
if len(audio.shape ) != 1:
raise ValueError('We expect a single channel audio input for ZeroShotAudioClassificationPipeline' )
_UpperCamelCase : Dict = self.feature_extractor(
[audio] ,sampling_rate=self.feature_extractor.sampling_rate ,return_tensors='pt' )
_UpperCamelCase : List[Any] = candidate_labels
_UpperCamelCase : Tuple = [hypothesis_template.format(lowerCamelCase__ ) for x in candidate_labels]
_UpperCamelCase : Dict = self.tokenizer(lowerCamelCase__ ,return_tensors=self.framework ,padding=lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = [text_inputs]
return inputs
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Tuple ):
'''simple docstring'''
_UpperCamelCase : Any = model_inputs.pop('candidate_labels' )
_UpperCamelCase : str = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] ,lowerCamelCase__ ):
_UpperCamelCase : Union[str, Any] = text_inputs[0]
else:
# Batching case.
_UpperCamelCase : int = text_inputs[0][0]
_UpperCamelCase : List[str] = self.model(**lowerCamelCase__ ,**lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_audio,
}
return model_outputs
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = model_outputs.pop('candidate_labels' )
_UpperCamelCase : int = model_outputs['logits'][0]
if self.framework == "pt":
_UpperCamelCase : Tuple = logits.softmax(dim=0 )
_UpperCamelCase : str = probs.tolist()
else:
raise ValueError('`tf` framework not supported.' )
_UpperCamelCase : Union[str, Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase__ ,lowerCamelCase__ ) ,key=lambda lowerCamelCase__ : -x[0] )
]
return result
| 195 | 0 |
import heapq
import sys
import numpy as np
_snake_case : List[Any] = tuple[int, int]
class a :
"""simple docstring"""
def __init__( self : str ) -> Union[str, Any]:
__snake_case : Tuple = []
__snake_case : Any = set()
def __snake_case ( self : int ) -> str:
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def __snake_case ( self : str ) -> str:
return len(self.elements ) == 0
def __snake_case ( self : Dict , lowerCamelCase : Tuple , lowerCamelCase : Any ) -> List[Any]:
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(__SCREAMING_SNAKE_CASE )
else:
# update
# print("update", item)
__snake_case : Union[str, Any] = []
((__snake_case) , (__snake_case)) : int = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__snake_case) , (__snake_case)) : int = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def __snake_case ( self : List[Any] , lowerCamelCase : int ) -> int:
if item in self.set:
self.set.remove(__SCREAMING_SNAKE_CASE )
__snake_case : List[str] = []
((__snake_case) , (__snake_case)) : Optional[Any] = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__snake_case) , (__snake_case)) : List[Any] = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def __snake_case ( self : str ) -> Union[str, Any]:
return self.elements[0][1]
def __snake_case ( self : Tuple ) -> Optional[Any]:
((__snake_case) , (__snake_case)) : Dict = heapq.heappop(self.elements )
self.set.remove(__SCREAMING_SNAKE_CASE )
return (priority, item)
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
# euclidean distance
__snake_case : int = np.array(_UpperCAmelCase )
__snake_case : str = np.array(_UpperCAmelCase )
return np.linalg.norm(a - b )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
# integer division by time variable
return consistent_heuristic(_UpperCAmelCase , _UpperCAmelCase ) // t
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Union[str, Any] = g_function[start] + Wa * heuristics[i](_UpperCAmelCase , _UpperCAmelCase )
return ans
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[int] = np.chararray((n, n) )
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
__snake_case : List[Any] = "*"
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
if (j, (n - 1) - i) in blocks:
__snake_case : List[Any] = "#"
__snake_case : Any = "-"
__snake_case : Dict = back_pointer[goal]
while x != start:
((__snake_case) , (__snake_case)) : List[str] = x
# print(x)
__snake_case : Union[str, Any] = "-"
__snake_case : Optional[Any] = back_pointer[x]
__snake_case : List[Any] = "-"
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
__snake_case : Tuple = back_pointer[goal]
while x != start:
print(_UpperCAmelCase , end=" " )
__snake_case : List[Any] = back_pointer[x]
print(_UpperCAmelCase )
sys.exit()
def lowerCAmelCase_ ( __lowerCamelCase ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
for itera in range(_UpperCAmelCase ):
open_list[itera].remove_element(_UpperCAmelCase )
# print("s", s)
# print("j", j)
((__snake_case) , (__snake_case)) : str = s
__snake_case : Optional[int] = (x - 1, y)
__snake_case : Any = (x + 1, y)
__snake_case : List[str] = (x, y + 1)
__snake_case : Dict = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCAmelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCAmelCase )
__snake_case : Any = -1
__snake_case : Tuple = float("inf" )
if valid(_UpperCAmelCase ) and g_function[neighbours] > g_function[s] + 1:
__snake_case : List[str] = g_function[s] + 1
__snake_case : Optional[int] = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCAmelCase , key(_UpperCAmelCase , 0 , _UpperCAmelCase , _UpperCAmelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _UpperCAmelCase ):
if key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) <= Wa * key(
_UpperCAmelCase , 0 , _UpperCAmelCase , _UpperCAmelCase ):
open_list[j].put(
_UpperCAmelCase , key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
def lowerCAmelCase_ ( ):
__snake_case : Optional[int] = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
_snake_case : List[Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
_snake_case : str = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
_snake_case : Union[str, Any] = make_common_ground()
_snake_case : Any = blocks_blk
# hyper parameters
_snake_case : Union[str, Any] = 1
_snake_case : List[Any] = 1
_snake_case : List[Any] = 20
_snake_case : List[Any] = 3 # one consistent and two other inconsistent
# start and end destination
_snake_case : Union[str, Any] = (0, 0)
_snake_case : Tuple = (n - 1, n - 1)
_snake_case : Optional[int] = 1
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[int] = {start: 0, goal: float("inf" )}
__snake_case : str = {start: -1, goal: -1}
__snake_case : str = []
__snake_case : Union[str, Any] = set()
for i in range(_UpperCAmelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCAmelCase , key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
__snake_case : Dict = []
__snake_case : str = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , _UpperCAmelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
__snake_case , __snake_case : Optional[int] = open_list[i].top_show()
visited.add(_UpperCAmelCase )
expand_state(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
close_list_inad.append(_UpperCAmelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
__snake_case : int = open_list[0].top_show()
visited.add(_UpperCAmelCase )
expand_state(
_UpperCAmelCase , 0 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
close_list_anchor.append(_UpperCAmelCase )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_UpperCAmelCase ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 716 |
import unittest
import numpy as np
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , ):
__snake_case : List[str] = np.shape(__lowerCamelCase )
__snake_case : Optional[Any] = np.shape(__lowerCamelCase )
__snake_case : List[str] = np.shape(__lowerCamelCase )
if shape_a[0] != shape_b[0]:
__snake_case : Any = (
"Expected the same number of rows for A and B. "
F'Instead found A of size {shape_a} and B of size {shape_b}'
)
raise ValueError(__lowerCamelCase )
if shape_b[1] != shape_c[1]:
__snake_case : int = (
"Expected the same number of columns for B and C. "
F'Instead found B of size {shape_b} and C of size {shape_c}'
)
raise ValueError(__lowerCamelCase )
__snake_case : str = pseudo_inv
if a_inv is None:
try:
__snake_case : Optional[Any] = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
raise ValueError(
"Input matrix A is not invertible. Cannot compute Schur complement." )
return mat_c - mat_b.T @ a_inv @ mat_b
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Tuple ) -> None:
__snake_case : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__snake_case : str = np.array([[0, 3], [3, 0], [2, 3]] )
__snake_case : Dict = np.array([[2, 1], [6, 3]] )
__snake_case : Dict = schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__snake_case : int = np.block([[a, b], [b.T, c]] )
__snake_case : Optional[int] = np.linalg.det(lowerCamelCase )
__snake_case : Any = np.linalg.det(lowerCamelCase )
__snake_case : Tuple = np.linalg.det(lowerCamelCase )
self.assertAlmostEqual(lowerCamelCase , det_a * det_s )
def __snake_case ( self : int ) -> None:
__snake_case : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__snake_case : Dict = np.array([[0, 3], [3, 0], [2, 3]] )
__snake_case : Tuple = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowerCamelCase ):
schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __snake_case ( self : List[Any] ) -> None:
__snake_case : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__snake_case : Tuple = np.array([[0, 3], [3, 0], [2, 3]] )
__snake_case : List[Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowerCamelCase ):
schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 203 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline | 57 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
A_ : Optional[Any] = data_utils.TransfoXLTokenizer
A_ : Union[str, Any] = data_utils.TransfoXLCorpus
A_ : Any = data_utils
A_ : Optional[Any] = data_utils
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(UpperCAmelCase__ , 'rb' ) as fp:
UpperCamelCase_: Union[str, Any] = pickle.load(UpperCAmelCase__ , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCamelCase_: Any = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' )
UpperCamelCase_: Union[str, Any] = corpus.vocab.__dict__
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: str = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , UpperCAmelCase__ )
UpperCamelCase_: str = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCamelCase_: Any = os.path.abspath(UpperCAmelCase__ )
UpperCamelCase_: Dict = os.path.abspath(UpperCAmelCase__ )
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCamelCase_: List[str] = TransfoXLConfig()
else:
UpperCamelCase_: Optional[int] = TransfoXLConfig.from_json_file(UpperCAmelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_: Union[str, Any] = TransfoXLLMHeadModel(UpperCAmelCase__ )
UpperCamelCase_: Tuple = load_tf_weights_in_transfo_xl(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
UpperCamelCase_: str = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: Union[str, Any] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
print(F'''Save PyTorch model to {os.path.abspath(UpperCAmelCase__ )}''' )
torch.save(model.state_dict() , UpperCAmelCase__ )
print(F'''Save configuration file to {os.path.abspath(UpperCAmelCase__ )}''' )
with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ : int = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
A_ : Tuple = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
) | 57 | 1 |
def _lowerCAmelCase ( A__: int = 1000 ):
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 391 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase :
'''simple docstring'''
def __init__( self , _snake_case , _snake_case=100 , _snake_case=13 , _snake_case=30 , _snake_case=2 , _snake_case=3 , _snake_case=True , _snake_case=True , _snake_case=32 , _snake_case=4 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=10 , _snake_case=0.02 , _snake_case=3 , _snake_case=None , _snake_case=[0, 1, 2, 3] , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = 100
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
UpperCAmelCase = out_indices
UpperCAmelCase = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = num_patches + 1
def snake_case_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case_ ( self ) -> int:
"""simple docstring"""
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case ) -> Any:
"""simple docstring"""
UpperCAmelCase = BeitModel(config=_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case ) -> Dict:
"""simple docstring"""
UpperCAmelCase = BeitForMaskedImageModeling(config=_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case ) -> str:
"""simple docstring"""
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = BeitForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = BeitForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = BeitForSemanticSegmentation(_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase = model(_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCAmelCase = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": BeitModel,
"""image-classification""": BeitForImageClassification,
"""image-segmentation""": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = BeitModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def snake_case_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) )
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_snake_case )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case )
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def snake_case_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def snake_case_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def snake_case_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_snake_case )
def snake_case_ ( self ) -> str:
"""simple docstring"""
if not self.model_tester.is_training:
return
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(_snake_case ), BeitForMaskedImageModeling]:
continue
UpperCAmelCase = model_class(_snake_case )
model.to(_snake_case )
model.train()
UpperCAmelCase = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
UpperCAmelCase = model(**_snake_case ).loss
loss.backward()
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase = False
UpperCAmelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(_snake_case ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase = model_class(_snake_case )
model.gradient_checkpointing_enable()
model.to(_snake_case )
model.train()
UpperCAmelCase = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
UpperCAmelCase = model(**_snake_case ).loss
loss.backward()
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(config=_snake_case )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = BeitModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(_snake_case )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_snake_case , return_tensors='''pt''' ).pixel_values.to(_snake_case )
# prepare bool_masked_pos
UpperCAmelCase = torch.ones((1, 196) , dtype=torch.bool ).to(_snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(pixel_values=_snake_case , bool_masked_pos=_snake_case )
UpperCAmelCase = outputs.logits
# verify the logits
UpperCAmelCase = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , _snake_case )
UpperCAmelCase = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(_snake_case )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _snake_case , atol=1e-2 ) )
@slow
def snake_case_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(_snake_case )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_snake_case , return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**_snake_case )
UpperCAmelCase = outputs.logits
# verify the logits
UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(logits.shape , _snake_case )
UpperCAmelCase = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(_snake_case )
self.assertTrue(torch.allclose(logits[0, :3] , _snake_case , atol=1e-4 ) )
UpperCAmelCase = 281
self.assertEqual(logits.argmax(-1 ).item() , _snake_case )
@slow
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
_snake_case )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_snake_case , return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**_snake_case )
UpperCAmelCase = outputs.logits
# verify the logits
UpperCAmelCase = torch.Size((1, 2_1841) )
self.assertEqual(logits.shape , _snake_case )
UpperCAmelCase = torch.tensor([1.6881, -0.2787, 0.5901] ).to(_snake_case )
self.assertTrue(torch.allclose(logits[0, :3] , _snake_case , atol=1e-4 ) )
UpperCAmelCase = 2396
self.assertEqual(logits.argmax(-1 ).item() , _snake_case )
@slow
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
UpperCAmelCase = model.to(_snake_case )
UpperCAmelCase = BeitImageProcessor(do_resize=_snake_case , size=640 , do_center_crop=_snake_case )
UpperCAmelCase = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase = Image.open(ds[0]['''file'''] )
UpperCAmelCase = image_processor(images=_snake_case , return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**_snake_case )
UpperCAmelCase = outputs.logits
# verify the logits
UpperCAmelCase = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , _snake_case )
UpperCAmelCase = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
UpperCAmelCase = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=_snake_case , )
else:
UpperCAmelCase = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=_snake_case , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _snake_case , atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
UpperCAmelCase = model.to(_snake_case )
UpperCAmelCase = BeitImageProcessor(do_resize=_snake_case , size=640 , do_center_crop=_snake_case )
UpperCAmelCase = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase = Image.open(ds[0]['''file'''] )
UpperCAmelCase = image_processor(images=_snake_case , return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**_snake_case )
UpperCAmelCase = outputs.logits.detach().cpu()
UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=_snake_case , target_sizes=[(500, 300)] )
UpperCAmelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _snake_case )
UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=_snake_case )
UpperCAmelCase = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , _snake_case )
| 391 | 1 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
__a : int = '''
import os
'''
__a : Optional[Any] = '''
def foo():
import os
return False
'''
__a : Optional[int] = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
__a : List[str] = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
__a : Any = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
__a : str = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
__a : Optional[int] = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
__a : Optional[int] = '''
import os
try:
import bar
except:
raise ValueError()
'''
__a : List[Any] = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
__a : Optional[Any] = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
__a : Tuple = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("case" ,__A )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowercase__ : List[Any] = os.path.join(__A ,"test_file.py" )
with open(__A ,"w" ) as _tmp_file:
_tmp_file.write(__A )
lowercase__ : Dict = get_imports(__A )
assert parsed_imports == ["os"] | 397 |
import os
import pytest
from attr import dataclass
__lowercase : Optional[int] = '''us-east-1''' # defaults region
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : Dict = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
__lowerCamelCase : Optional[Any] = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 1_6,
'''per_device_eval_batch_size''': 1_6,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 5_0_0,
'''save_steps''': 5_5_0_0,
}
__lowerCamelCase : List[str] = {**hyperparameters, '''max_steps''': 1_0_0_0}
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""{self.framework}-transfromers-test"""
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 36 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
lowerCamelCase_ = 'CIDAS/clipseg-rd64-refined'
lowerCamelCase_ = 'image_segmenter'
lowerCamelCase_ = CLIPSegForImageSegmentation
lowerCamelCase_ = ['image', 'text']
lowerCamelCase_ = ['image']
def __init__( self : Optional[int] , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Tuple ):
'''simple docstring'''
requires_backends(self , ['''vision'''] )
super().__init__(*__A , **__A )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : "Image" , UpperCAmelCase__ : str ):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=__A , return_tensors='''pt''' )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
with torch.no_grad():
lowercase : str =self.model(**__A ).logits
return logits
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : Dict ):
'''simple docstring'''
lowercase : Tuple =outputs.cpu().detach().numpy()
lowercase : str =0
lowercase : str =1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 718 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = tf.data.AUTOTUNE
def _lowerCAmelCase ( ) -> Any:
lowercase : Dict =argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=__magic_name__ , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=__magic_name__ , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=__magic_name__ , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=__magic_name__ , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=__magic_name__ , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=__magic_name__ , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=__magic_name__ , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=__magic_name__ , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=__magic_name__ , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=__magic_name__ , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=__magic_name__ , default=1E-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=__magic_name__ , default=1E-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=__magic_name__ , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=__magic_name__ , default=0.1_5 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=__magic_name__ , required=__magic_name__ , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=__magic_name__ , help='''Model ID to upload to on the Hugging Face Hub.''' )
lowercase : Union[str, Any] =parser.parse_args()
return args
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> List[Any]:
try:
if args.tpu_name:
lowercase : Dict =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowercase : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(__magic_name__ )
tf.tpu.experimental.initialize_tpu_system(__magic_name__ )
return tpu
def _lowerCAmelCase ( __magic_name__ : Tuple ) -> Union[str, Any]:
lowercase : str =0
for file in file_list:
lowercase : List[str] =file.split('''/''' )[-1]
lowercase : Union[str, Any] =re.search(R'''-\d+-(\d+)\.tfrecord''' , __magic_name__ ).group(1 )
lowercase : int =int(__magic_name__ )
num_samples += sample_count
return num_samples
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int]=None ) -> str:
lowercase : int =count_samples(__magic_name__ )
lowercase : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__magic_name__ )
if shuffle:
lowercase : Union[str, Any] =dataset.shuffle(len(__magic_name__ ) )
lowercase : Any =tf.data.TFRecordDataset(__magic_name__ , num_parallel_reads=__magic_name__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase : Optional[int] =dataset.apply(tf.data.experimental.assert_cardinality(__magic_name__ ) )
lowercase : str =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ )
if shuffle:
assert shuffle_buffer_size is not None
lowercase : int =dataset.shuffle(args.shuffle_buffer_size )
lowercase : Optional[int] =dataset.batch(__magic_name__ , drop_remainder=__magic_name__ )
lowercase : int =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ )
lowercase : Union[str, Any] =dataset.prefetch(__magic_name__ )
return dataset
def _lowerCAmelCase ( __magic_name__ : Any ) -> str:
if not args.no_tpu:
lowercase : Optional[Any] =initialize_tpu(__magic_name__ )
lowercase : Any =tf.distribute.TPUStrategy(__magic_name__ )
else:
lowercase : Optional[Any] =tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
lowercase : Any =AutoTokenizer.from_pretrained(args.tokenizer )
lowercase : Union[str, Any] =AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase : Optional[Any] =tokenizer.vocab_size
lowercase : str =tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' )
lowercase : Optional[int] =tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' )
lowercase : Any =count_samples(__magic_name__ )
lowercase : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase : Union[str, Any] =steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase : List[Any] =TFAutoModelForMaskedLM.from_config(__magic_name__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase , lowercase : Dict =create_optimizer(
num_train_steps=__magic_name__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__magic_name__ , metrics=['''accuracy'''] )
def decode_fn(__magic_name__ : Optional[Any] ):
lowercase : Union[str, Any] ={
'''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__magic_name__ , __magic_name__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase : str =DataCollatorForLanguageModeling(
tokenizer=__magic_name__ , mlm_probability=args.mlm_probability , mlm=__magic_name__ , return_tensors='''tf''' )
def mask_with_collator(__magic_name__ : Dict ):
# TF really needs an isin() function
lowercase : int =(
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch['''input_ids'''] == tokenizer.cls_token_id)
| (batch['''input_ids'''] == tokenizer.sep_token_id)
)
lowercase , lowercase : Union[str, Any] =data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(__magic_name__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__magic_name__ , )
return batch
lowercase : List[str] =args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase : Dict =prepare_dataset(
__magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , shuffle_buffer_size=args.shuffle_buffer_size , )
lowercase : Union[str, Any] =prepare_dataset(
__magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , )
lowercase : Tuple =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__magic_name__ ) )
model.fit(
__magic_name__ , validation_data=__magic_name__ , epochs=args.num_epochs , callbacks=__magic_name__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
UpperCamelCase_ = parse_args()
main(args)
| 88 | 0 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
UpperCamelCase__ : Optional[Any] = _symbol_database.Default()
UpperCamelCase__ : Any = _descriptor_pool.Default().AddSerializedFile(
B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
UpperCamelCase__ : int = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
UpperCamelCase__ : Union[str, Any] = None
UpperCamelCase__ : Any = B"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
UpperCamelCase__ : str = 45
UpperCamelCase__ : Dict = 1_581
UpperCamelCase__ : Optional[Any] = 1_517
UpperCamelCase__ : Tuple = 1_570
UpperCamelCase__ : Tuple = 1_584
UpperCamelCase__ : Optional[Any] = 1_793
UpperCamelCase__ : str = 1_795
UpperCamelCase__ : Dict = 1_916
UpperCamelCase__ : Optional[int] = 1_864
UpperCamelCase__ : List[Any] = 1_905
UpperCamelCase__ : List[Any] = 1_919
UpperCamelCase__ : int = 2_429
UpperCamelCase__ : Dict = 2_208
UpperCamelCase__ : Union[str, Any] = 2_418
UpperCamelCase__ : Dict = 2_323
UpperCamelCase__ : Optional[int] = 2_407
# @@protoc_insertion_point(module_scope)
| 591 |
'''simple docstring'''
from ....utils import logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
class _a (_lowerCamelCase):
"""simple docstring"""
def __init__( self , A__ , A__=None , A__=20_48 ) -> Tuple:
_SCREAMING_SNAKE_CASE = config.__dict__
_SCREAMING_SNAKE_CASE = modal_hidden_size
if num_labels:
_SCREAMING_SNAKE_CASE = num_labels
| 591 | 1 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer']
__SCREAMING_SNAKE_CASE = 'BlipImageProcessor'
__SCREAMING_SNAKE_CASE = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase =False
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
_lowercase =self.image_processor
def __call__( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = True , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = True , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
_lowercase =self.tokenizer
_lowercase =self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
return text_encoding
# add pixel_values
_lowercase =self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
if text is not None:
_lowercase =self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
else:
_lowercase =None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase_ )
return encoding_image_processor
def __lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def __lowerCAmelCase ( self ):
_lowercase =self.tokenizer.model_input_names
_lowercase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 594 | def __lowerCamelCase ( __a : list ) -> list:
if len(__a ) <= 1:
return lst
_lowercase =1
while i < len(__a ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_lowercase , _lowercase =lst[i], lst[i - 1]
i -= 1
if i == 0:
_lowercase =1
return lst
if __name__ == "__main__":
lowerCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 594 | 1 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
A = get_logger(__name__)
class lowercase__ :
def __init__( self : Any , _lowercase : Optional[str] = None ):
"""simple docstring"""
UpperCAmelCase__ = (
os.path.join(_lowercase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
UpperCAmelCase__ = Extractor
def _UpperCAmelCase ( self : Optional[int] , _lowercase : str ):
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
UpperCAmelCase__ = os.path.abspath(_lowercase )
return os.path.join(self.extract_dir , hash_url_to_filename(_lowercase ) )
def _UpperCAmelCase ( self : List[Any] , _lowercase : str , _lowercase : bool ):
"""simple docstring"""
return force_extract or (
not os.path.isfile(_lowercase ) and not (os.path.isdir(_lowercase ) and os.listdir(_lowercase ))
)
def _UpperCAmelCase ( self : List[str] , _lowercase : str , _lowercase : bool = False ):
"""simple docstring"""
UpperCAmelCase__ = self.extractor.infer_extractor_format(_lowercase )
if not extractor_format:
return input_path
UpperCAmelCase__ = self._get_output_path(_lowercase )
if self._do_extract(_lowercase , _lowercase ):
self.extractor.extract(_lowercase , _lowercase , _lowercase )
return output_path
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
@classmethod
@abstractmethod
def _UpperCAmelCase ( cls : Dict , _lowercase : Union[Path, str] , **_lowercase : Tuple ):
"""simple docstring"""
...
@staticmethod
@abstractmethod
def _UpperCAmelCase ( _lowercase : Union[Path, str] , _lowercase : Union[Path, str] ):
"""simple docstring"""
...
class lowercase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
A__= []
@staticmethod
def _UpperCAmelCase ( _lowercase : Union[Path, str] , _lowercase : int ):
"""simple docstring"""
with open(_lowercase , "rb" ) as f:
return f.read(_lowercase )
@classmethod
def _UpperCAmelCase ( cls : Tuple , _lowercase : Union[Path, str] , _lowercase : bytes = b"" ):
"""simple docstring"""
if not magic_number:
UpperCAmelCase__ = max(len(_lowercase ) for cls_magic_number in cls.magic_numbers )
try:
UpperCAmelCase__ = cls.read_magic_number(_lowercase , _lowercase )
except OSError:
return False
return any(magic_number.startswith(_lowercase ) for cls_magic_number in cls.magic_numbers )
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
@classmethod
def _UpperCAmelCase ( cls : Optional[int] , _lowercase : Union[Path, str] , **_lowercase : List[Any] ):
"""simple docstring"""
return tarfile.is_tarfile(_lowercase )
@staticmethod
def _UpperCAmelCase ( _lowercase : Union[str, Any] , _lowercase : Any ):
"""simple docstring"""
def resolved(_lowercase : str ) -> str:
return os.path.realpath(os.path.abspath(_lowercase ) )
def badpath(_lowercase : str , _lowercase : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_lowercase , _lowercase ) ).startswith(_lowercase )
def badlink(_lowercase : Dict , _lowercase : str ) -> bool:
# Links are interpreted relative to the directory containing the link
UpperCAmelCase__ = resolved(os.path.join(_lowercase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_lowercase )
UpperCAmelCase__ = resolved(_lowercase )
for finfo in members:
if badpath(finfo.name , _lowercase ):
logger.error(F"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(_lowercase , _lowercase ):
logger.error(F"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(_lowercase , _lowercase ):
logger.error(F"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def _UpperCAmelCase ( _lowercase : Union[Path, str] , _lowercase : Union[Path, str] ):
"""simple docstring"""
os.makedirs(_lowercase , exist_ok=_lowercase )
UpperCAmelCase__ = tarfile.open(_lowercase )
tar_file.extractall(_lowercase , members=TarExtractor.safemembers(_lowercase , _lowercase ) )
tar_file.close()
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= [B'\x1F\x8B']
@staticmethod
def _UpperCAmelCase ( _lowercase : Union[Path, str] , _lowercase : Union[Path, str] ):
"""simple docstring"""
with gzip.open(_lowercase , "rb" ) as gzip_file:
with open(_lowercase , "wb" ) as extracted_file:
shutil.copyfileobj(_lowercase , _lowercase )
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= [
B'PK\x03\x04',
B'PK\x05\x06', # empty archive
B'PK\x07\x08', # spanned archive
]
@classmethod
def _UpperCAmelCase ( cls : List[Any] , _lowercase : Union[Path, str] , _lowercase : bytes = b"" ):
"""simple docstring"""
if super().is_extractable(_lowercase , magic_number=_lowercase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_lowercase , "rb" ) as fp:
UpperCAmelCase__ = _EndRecData(_lowercase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
UpperCAmelCase__ = fp.read(_lowercase ) # CD is where we expect it to be
if len(_lowercase ) == sizeCentralDir:
UpperCAmelCase__ = struct.unpack(_lowercase , _lowercase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _UpperCAmelCase ( _lowercase : Union[Path, str] , _lowercase : Union[Path, str] ):
"""simple docstring"""
os.makedirs(_lowercase , exist_ok=_lowercase )
with zipfile.ZipFile(_lowercase , "r" ) as zip_file:
zip_file.extractall(_lowercase )
zip_file.close()
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= [B'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def _UpperCAmelCase ( _lowercase : Union[Path, str] , _lowercase : Union[Path, str] ):
"""simple docstring"""
with lzma.open(_lowercase ) as compressed_file:
with open(_lowercase , "wb" ) as extracted_file:
shutil.copyfileobj(_lowercase , _lowercase )
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= [B'Rar!\x1a\x07\x00', B'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def _UpperCAmelCase ( _lowercase : Union[Path, str] , _lowercase : Union[Path, str] ):
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(_lowercase , exist_ok=_lowercase )
UpperCAmelCase__ = rarfile.RarFile(_lowercase )
rf.extractall(_lowercase )
rf.close()
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= [B'\x28\xb5\x2F\xFD']
@staticmethod
def _UpperCAmelCase ( _lowercase : Union[Path, str] , _lowercase : Union[Path, str] ):
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
UpperCAmelCase__ = zstd.ZstdDecompressor()
with open(_lowercase , "rb" ) as ifh, open(_lowercase , "wb" ) as ofh:
dctx.copy_stream(_lowercase , _lowercase )
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= [B'\x42\x5A\x68']
@staticmethod
def _UpperCAmelCase ( _lowercase : Union[Path, str] , _lowercase : Union[Path, str] ):
"""simple docstring"""
with bza.open(_lowercase , "rb" ) as compressed_file:
with open(_lowercase , "wb" ) as extracted_file:
shutil.copyfileobj(_lowercase , _lowercase )
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= [B'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def _UpperCAmelCase ( _lowercase : Union[Path, str] , _lowercase : Union[Path, str] ):
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(_lowercase , exist_ok=_lowercase )
with pyazr.SevenZipFile(_lowercase , "r" ) as archive:
archive.extractall(_lowercase )
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= [B'\x04\x22\x4D\x18']
@staticmethod
def _UpperCAmelCase ( _lowercase : Union[Path, str] , _lowercase : Union[Path, str] ):
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(_lowercase , "rb" ) as compressed_file:
with open(_lowercase , "wb" ) as extracted_file:
shutil.copyfileobj(_lowercase , _lowercase )
class lowercase__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
A__= {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _UpperCAmelCase ( cls : Optional[Any] ):
"""simple docstring"""
return max(
len(_lowercase )
for extractor in cls.extractors.values()
if issubclass(_lowercase , _lowercase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _UpperCAmelCase ( _lowercase : Union[Path, str] , _lowercase : int ):
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(_lowercase , magic_number_length=_lowercase )
except OSError:
return b""
@classmethod
def _UpperCAmelCase ( cls : Dict , _lowercase : Union[Path, str] , _lowercase : bool = False ):
"""simple docstring"""
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=_lowercase , )
UpperCAmelCase__ = cls.infer_extractor_format(_lowercase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _UpperCAmelCase ( cls : Dict , _lowercase : Union[Path, str] ): # <Added version="2.4.0"/>
"""simple docstring"""
UpperCAmelCase__ = cls._get_magic_number_max_length()
UpperCAmelCase__ = cls._read_magic_number(_lowercase , _lowercase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_lowercase , magic_number=_lowercase ):
return extractor_format
@classmethod
def _UpperCAmelCase ( cls : Dict , _lowercase : Union[Path, str] , _lowercase : Union[Path, str] , _lowercase : Optional[str] = None , _lowercase : Optional[BaseExtractor] = "deprecated" , ):
"""simple docstring"""
os.makedirs(os.path.dirname(_lowercase ) , exist_ok=_lowercase )
# Prevent parallel extractions
UpperCAmelCase__ = str(Path(_lowercase ).with_suffix(".lock" ) )
with FileLock(_lowercase ):
shutil.rmtree(_lowercase , ignore_errors=_lowercase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_lowercase , _lowercase ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=_lowercase , )
UpperCAmelCase__ = extractor if extractor != "deprecated" else extractor_format
else:
UpperCAmelCase__ = cls.extractors[extractor_format]
return extractor.extract(_lowercase , _lowercase )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=_lowercase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_lowercase ):
return extractor.extract(_lowercase , _lowercase )
| 475 |
from __future__ import annotations
def __UpperCAmelCase ( __A , __A , __A , ) -> tuple[str, float]:
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 475 | 1 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Tuple = "detr"
__snake_case : Tuple = ["past_key_values"]
__snake_case : Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self: Optional[Any] , UpperCAmelCase_: int=True , UpperCAmelCase_: Tuple=None , UpperCAmelCase_: Optional[int]=3 , UpperCAmelCase_: List[str]=100 , UpperCAmelCase_: Union[str, Any]=6 , UpperCAmelCase_: List[str]=2_048 , UpperCAmelCase_: Optional[int]=8 , UpperCAmelCase_: Union[str, Any]=6 , UpperCAmelCase_: List[Any]=2_048 , UpperCAmelCase_: List[Any]=8 , UpperCAmelCase_: Optional[Any]=0.0 , UpperCAmelCase_: str=0.0 , UpperCAmelCase_: Tuple=True , UpperCAmelCase_: str="relu" , UpperCAmelCase_: Union[str, Any]=256 , UpperCAmelCase_: Dict=0.1 , UpperCAmelCase_: Any=0.0 , UpperCAmelCase_: Union[str, Any]=0.0 , UpperCAmelCase_: List[str]=0.02 , UpperCAmelCase_: Optional[int]=1.0 , UpperCAmelCase_: Dict=False , UpperCAmelCase_: Any="sine" , UpperCAmelCase_: Union[str, Any]="resnet50" , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: List[Any]=False , UpperCAmelCase_: int=1 , UpperCAmelCase_: str=5 , UpperCAmelCase_: str=2 , UpperCAmelCase_: List[Any]=1 , UpperCAmelCase_: Tuple=1 , UpperCAmelCase_: Optional[Any]=5 , UpperCAmelCase_: int=2 , UpperCAmelCase_: Dict=0.1 , **UpperCAmelCase_: int , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = backbone_config.get("""model_type""" )
_SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
_SCREAMING_SNAKE_CASE = config_class.from_dict(_UpperCAmelCase )
# set timm attributes to None
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None, None, None
_SCREAMING_SNAKE_CASE = use_timm_backbone
_SCREAMING_SNAKE_CASE = backbone_config
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = num_queries
_SCREAMING_SNAKE_CASE = d_model
_SCREAMING_SNAKE_CASE = encoder_ffn_dim
_SCREAMING_SNAKE_CASE = encoder_layers
_SCREAMING_SNAKE_CASE = encoder_attention_heads
_SCREAMING_SNAKE_CASE = decoder_ffn_dim
_SCREAMING_SNAKE_CASE = decoder_layers
_SCREAMING_SNAKE_CASE = decoder_attention_heads
_SCREAMING_SNAKE_CASE = dropout
_SCREAMING_SNAKE_CASE = attention_dropout
_SCREAMING_SNAKE_CASE = activation_dropout
_SCREAMING_SNAKE_CASE = activation_function
_SCREAMING_SNAKE_CASE = init_std
_SCREAMING_SNAKE_CASE = init_xavier_std
_SCREAMING_SNAKE_CASE = encoder_layerdrop
_SCREAMING_SNAKE_CASE = decoder_layerdrop
_SCREAMING_SNAKE_CASE = encoder_layers
_SCREAMING_SNAKE_CASE = auxiliary_loss
_SCREAMING_SNAKE_CASE = position_embedding_type
_SCREAMING_SNAKE_CASE = backbone
_SCREAMING_SNAKE_CASE = use_pretrained_backbone
_SCREAMING_SNAKE_CASE = dilation
# Hungarian matcher
_SCREAMING_SNAKE_CASE = class_cost
_SCREAMING_SNAKE_CASE = bbox_cost
_SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
_SCREAMING_SNAKE_CASE = mask_loss_coefficient
_SCREAMING_SNAKE_CASE = dice_loss_coefficient
_SCREAMING_SNAKE_CASE = bbox_loss_coefficient
_SCREAMING_SNAKE_CASE = giou_loss_coefficient
_SCREAMING_SNAKE_CASE = eos_coefficient
super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
@property
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
return self.d_model
@classmethod
def UpperCamelCase ( cls: Optional[int] , UpperCAmelCase_: PretrainedConfig , **UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
return cls(backbone_config=_UpperCAmelCase , **_UpperCAmelCase )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
_SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : List[Any] = version.parse("1.11" )
@property
def UpperCamelCase ( self: Any ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
return 1E-5
@property
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
return 12
| 714 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 569 | 0 |
'''simple docstring'''
def _A ( A ) -> int:
lowercase : List[str] = [1]
lowercase , lowercase , lowercase : Tuple = 0, 0, 0
lowercase : str = ugly_nums[ia] * 2
lowercase : Optional[int] = ugly_nums[ia] * 3
lowercase : Dict = ugly_nums[ia] * 5
for _ in range(1 ,_lowerCamelCase ):
lowercase : int = min(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
ugly_nums.append(_lowerCamelCase )
if next_num == next_a:
ia += 1
lowercase : Tuple = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
lowercase : Dict = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
lowercase : Tuple = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F'''{ugly_numbers(2_0_0) = }''')
| 372 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCAmelCase = logging.get_logger(__name__)
class lowercase ( lowercase__ ):
def __init__(self : Dict ,*SCREAMING_SNAKE_CASE_ : str ,**SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> None:
"""simple docstring"""
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' ,SCREAMING_SNAKE_CASE_ ,)
super().__init__(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 535 | 0 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
UpperCAmelCase : Dict = TypeVar("""KEY""")
UpperCAmelCase : List[str] = TypeVar("""VAL""")
@dataclass(frozen=_lowercase , slots=_lowercase)
class UpperCAmelCase_ ( Generic[KEY, VAL]):
snake_case__ = 42
snake_case__ = 42
class UpperCAmelCase_ ( _Item):
def __init__( self : Tuple ) -> None:
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __bool__( self : List[Any] ) -> bool:
return False
UpperCAmelCase : Optional[int] = _DeletedItem()
class UpperCAmelCase_ ( MutableMapping[KEY, VAL]):
def __init__( self : Tuple , __UpperCamelCase : int = 8 , __UpperCamelCase : float = 0.7_5 ) -> None:
_UpperCamelCase = initial_block_size
_UpperCamelCase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_UpperCamelCase = capacity_factor
_UpperCamelCase = 0
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : KEY ) -> int:
return hash(_UpperCAmelCase ) % len(self._buckets )
def _UpperCamelCase ( self : Dict , __UpperCamelCase : int ) -> int:
return (ind + 1) % len(self._buckets )
def _UpperCamelCase ( self : Dict , __UpperCamelCase : int , __UpperCamelCase : KEY , __UpperCamelCase : VAL ) -> bool:
_UpperCamelCase = self._buckets[ind]
if not stored:
_UpperCamelCase = _Item(_UpperCAmelCase , _UpperCAmelCase )
self._len += 1
return True
elif stored.key == key:
_UpperCamelCase = _Item(_UpperCAmelCase , _UpperCAmelCase )
return True
else:
return False
def _UpperCamelCase ( self : Union[str, Any] ) -> bool:
_UpperCamelCase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(_UpperCAmelCase )
def _UpperCamelCase ( self : Union[str, Any] ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
_UpperCamelCase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : int ) -> None:
_UpperCamelCase = self._buckets
_UpperCamelCase = [None] * new_size
_UpperCamelCase = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _UpperCamelCase ( self : Optional[int] ) -> None:
self._resize(len(self._buckets ) * 2 )
def _UpperCamelCase ( self : str ) -> None:
self._resize(len(self._buckets ) // 2 )
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : KEY ) -> Iterator[int]:
_UpperCamelCase = self._get_bucket_index(_UpperCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
_UpperCamelCase = self._get_next_ind(_UpperCAmelCase )
def _UpperCamelCase ( self : int , __UpperCamelCase : KEY , __UpperCamelCase : VAL ) -> None:
for ind in self._iterate_buckets(_UpperCAmelCase ):
if self._try_set(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
break
def __setitem__( self : List[str] , __UpperCamelCase : KEY , __UpperCamelCase : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(_UpperCAmelCase , _UpperCAmelCase )
def __delitem__( self : Any , __UpperCamelCase : KEY ) -> None:
for ind in self._iterate_buckets(_UpperCAmelCase ):
_UpperCamelCase = self._buckets[ind]
if item is None:
raise KeyError(_UpperCAmelCase )
if item is _deleted:
continue
if item.key == key:
_UpperCamelCase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : int , __UpperCamelCase : KEY ) -> VAL:
for ind in self._iterate_buckets(_UpperCAmelCase ):
_UpperCamelCase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(_UpperCAmelCase )
def __len__( self : Union[str, Any] ) -> int:
return self._len
def __iter__( self : Optional[Any] ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : Tuple ) -> str:
_UpperCamelCase = ''' ,'''.join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 714 | """simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCAmelCase = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
UpperCAmelCase = {
"""allenai/led-base-16384""": 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase ( ) -> Union[str, Any]:
_UpperCamelCase = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
_UpperCamelCase = bs[:]
_UpperCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(a__ )
cs.append(2**8 + n )
n += 1
_UpperCamelCase = [chr(a__ ) for n in cs]
return dict(zip(a__ , a__ ) )
def lowercase ( a__ : Any ) -> Union[str, Any]:
_UpperCamelCase = set()
_UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCamelCase = char
return pairs
class UpperCAmelCase_ ( _lowercase):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ['''input_ids''', '''attention_mask''']
def __init__( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : List[str]="replace" , __UpperCamelCase : Any="<s>" , __UpperCamelCase : List[str]="</s>" , __UpperCamelCase : Tuple="</s>" , __UpperCamelCase : Any="<s>" , __UpperCamelCase : Tuple="<unk>" , __UpperCamelCase : Tuple="<pad>" , __UpperCamelCase : Optional[int]="<mask>" , __UpperCamelCase : List[Any]=False , **__UpperCamelCase : Optional[int] , ) -> Optional[Any]:
_UpperCamelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else bos_token
_UpperCamelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else eos_token
_UpperCamelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else sep_token
_UpperCamelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else cls_token
_UpperCamelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else unk_token
_UpperCamelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , **__UpperCamelCase , )
with open(__UpperCamelCase , encoding='''utf-8''' ) as vocab_handle:
_UpperCamelCase = json.load(__UpperCamelCase )
_UpperCamelCase = {v: k for k, v in self.encoder.items()}
_UpperCamelCase = errors # how to handle errors in decoding
_UpperCamelCase = bytes_to_unicode()
_UpperCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(__UpperCamelCase , encoding='''utf-8''' ) as merges_handle:
_UpperCamelCase = merges_handle.read().split('''\n''' )[1:-1]
_UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
_UpperCamelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_UpperCamelCase = {}
_UpperCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_UpperCamelCase = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _UpperCamelCase ( self : Dict ) -> List[Any]:
return len(self.encoder )
def _UpperCamelCase ( self : Optional[int] ) -> List[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def _UpperCamelCase ( self : int , __UpperCamelCase : int ) -> Optional[Any]:
if token in self.cache:
return self.cache[token]
_UpperCamelCase = tuple(__UpperCamelCase )
_UpperCamelCase = get_pairs(__UpperCamelCase )
if not pairs:
return token
while True:
_UpperCamelCase = min(__UpperCamelCase , key=lambda __UpperCamelCase : self.bpe_ranks.get(__UpperCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCamelCase , _UpperCamelCase = bigram
_UpperCamelCase = []
_UpperCamelCase = 0
while i < len(__UpperCamelCase ):
try:
_UpperCamelCase = word.index(__UpperCamelCase , __UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCamelCase = j
if word[i] == first and i < len(__UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCamelCase = tuple(__UpperCamelCase )
_UpperCamelCase = new_word
if len(__UpperCamelCase ) == 1:
break
else:
_UpperCamelCase = get_pairs(__UpperCamelCase )
_UpperCamelCase = ''' '''.join(__UpperCamelCase )
_UpperCamelCase = word
return word
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : List[str] ) -> Optional[int]:
_UpperCamelCase = []
for token in re.findall(self.pat , __UpperCamelCase ):
_UpperCamelCase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCamelCase ).split(''' ''' ) )
return bpe_tokens
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : List[Any] ) -> Optional[Any]:
return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) )
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
return self.decoder.get(__UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Optional[Any] ) -> Any:
_UpperCamelCase = ''''''.join(__UpperCamelCase )
_UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCamelCase = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCamelCase = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCamelCase , ensure_ascii=__UpperCamelCase ) + '''\n''' )
_UpperCamelCase = 0
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
_UpperCamelCase = token_index
writer.write(''' '''.join(__UpperCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
_UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1]
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self : str , __UpperCamelCase : Any , __UpperCamelCase : Tuple=False , **__UpperCamelCase : Optional[int] ) -> Any:
_UpperCamelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__UpperCamelCase ) > 0 and not text[0].isspace()):
_UpperCamelCase = ''' ''' + text
return (text, kwargs)
def _UpperCamelCase ( self : Any , __UpperCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[bool] = None , ) -> dict:
_UpperCamelCase = super()._pad(
encoded_inputs=__UpperCamelCase , max_length=__UpperCamelCase , padding_strategy=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , )
# Load from model defaults
if return_attention_mask is None:
_UpperCamelCase = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_UpperCamelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_UpperCamelCase = len(encoded_inputs['''global_attention_mask'''] ) != len(__UpperCamelCase )
if needs_to_be_padded:
_UpperCamelCase = len(__UpperCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_UpperCamelCase = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
_UpperCamelCase = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 342 | 0 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase_ = logging.getLogger()
def lowerCAmelCase ( UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
__magic_name__ : str = {}
__magic_name__ : Optional[Any] = os.path.join(UpperCAmelCase, '''all_results.json''' )
if os.path.exists(UpperCAmelCase ):
with open(UpperCAmelCase, '''r''' ) as f:
__magic_name__ : List[Any] = json.load(UpperCAmelCase )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
lowercase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class A__ ( __SCREAMING_SNAKE_CASE ):
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
import xla_spawn
__magic_name__ : Dict = self.get_auto_remove_tmp_dir()
__magic_name__ : Dict = F'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowerCamelCase , '''argv''' , lowerCamelCase ):
__magic_name__ : Optional[int] = time()
xla_spawn.main()
__magic_name__ : int = time()
__magic_name__ : Any = get_results(lowerCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def lowercase ( self ) -> Any:
"""simple docstring"""
import xla_spawn
__magic_name__ : Optional[int] = '''
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
'''.split()
with patch.object(lowerCamelCase , '''argv''' , lowerCamelCase ):
xla_spawn.main()
| 154 |
def lowerCAmelCase ( ) ->Dict:
"""simple docstring"""
__magic_name__ : Optional[int] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
__magic_name__ : Optional[Any] = 6
__magic_name__ : Dict = 1
__magic_name__ : Union[str, Any] = 1901
__magic_name__ : List[str] = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
__magic_name__ : int = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
__magic_name__ : Optional[int] = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
__magic_name__ : Any = day - days_per_month[month - 2]
if month > 12:
year += 1
__magic_name__ : int = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 154 | 1 |
from collections import namedtuple
_snake_case : Optional[int] = namedtuple('from_to', 'from_ to')
_snake_case : str = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.0_01, 1000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.0_04_54, 2_64.1_72),
'cubicyard': from_to(0.7_64_55, 1.3_07_95),
'cubicfoot': from_to(0.0_28, 35.31_47),
'cup': from_to(0.0_00_23_65_88, 42_26.75),
}
def a_ ( lowerCAmelCase_ : float, lowerCAmelCase_ : str, lowerCAmelCase_ : str ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ', '.join(lowerCAmelCase_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ', '.join(lowerCAmelCase_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 421 |
def a_ ( lowerCAmelCase_ : int ):
__lowerCAmelCase = int(lowerCAmelCase_ )
if n_element < 1:
__lowerCAmelCase = ValueError('a should be a positive number' )
raise my_error
__lowerCAmelCase = [1]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = (0, 0, 0)
__lowerCAmelCase = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2, hamming_list[j] * 3, hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_snake_case : List[Any] = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
_snake_case : str = hamming(int(n))
print('-----------------------------------------------------')
print(F"""The list with nth numbers is: {hamming_numbers}""")
print('-----------------------------------------------------')
| 421 | 1 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str=1_3 , SCREAMING_SNAKE_CASE_ : Tuple=7 , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=9_9 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE_ : List[str]=5 , SCREAMING_SNAKE_CASE_ : Any=4 , SCREAMING_SNAKE_CASE_ : Dict=6_4 , SCREAMING_SNAKE_CASE_ : Any="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE_ : str=1_6 , SCREAMING_SNAKE_CASE_ : Any=2 , SCREAMING_SNAKE_CASE_ : Tuple=0.02 , SCREAMING_SNAKE_CASE_ : str=3 , SCREAMING_SNAKE_CASE_ : Dict=4 , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : Tuple=2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Dict=2 , SCREAMING_SNAKE_CASE_ : Any=4 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1 , ) -> Dict:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
lowercase_ = q_groups
lowercase_ = k_groups
lowercase_ = v_groups
lowercase_ = post_attention_groups
lowercase_ = intermediate_groups
lowercase_ = output_groups
def _lowercase ( self : Any ) -> Optional[int]:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : List[Any] ) -> Optional[int]:
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]:
lowercase_ = SqueezeBertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ) -> Dict:
lowercase_ = SqueezeBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int ) -> List[Any]:
lowercase_ = SqueezeBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Optional[Any]:
lowercase_ = self.num_labels
lowercase_ = SqueezeBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int ) -> Optional[int]:
lowercase_ = self.num_labels
lowercase_ = SqueezeBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[Any]:
lowercase_ = self.num_choices
lowercase_ = SqueezeBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
lowercase_ = self.prepare_config_and_inputs()
((lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_)) = config_and_inputs
lowercase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase__( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Any = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
a :Tuple = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a :Union[str, Any] = False
a :Union[str, Any] = True
a :Optional[Any] = False
def _lowercase ( self : Optional[int] ) -> Tuple:
lowercase_ = SqueezeBertModelTester(self )
lowercase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , dim=3_7 )
def _lowercase ( self : int ) -> Tuple:
self.config_tester.run_common_tests()
def _lowercase ( self : Optional[Any] ) -> List[str]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] ) -> Any:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict ) -> Dict:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict ) -> int:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] ) -> Dict:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
@slow
def _lowercase ( self : Union[str, Any] ) -> Dict:
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = SqueezeBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : Dict ) -> Dict:
lowercase_ = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
lowercase_ = torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]] )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = torch.Size((1, 3) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.tensor([[0.64_01, -0.03_49, -0.60_41]] )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 97 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
lowercase__ =logging.get_logger(__name__)
# General docstring
lowercase__ ='ResNetConfig'
# Base docstring
lowercase__ ='microsoft/resnet-50'
lowercase__ =[1, 20_48, 7, 7]
# Image classification docstring
lowercase__ ='microsoft/resnet-50'
lowercase__ ='tiger cat'
lowercase__ =[
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 3 , UpperCAmelCase = 1 , UpperCAmelCase = "relu" ):
super().__init__()
a_ = nn.Convad(
UpperCAmelCase , UpperCAmelCase , kernel_size=UpperCAmelCase , stride=UpperCAmelCase , padding=kernel_size // 2 , bias=UpperCAmelCase )
a_ = nn.BatchNormad(UpperCAmelCase )
a_ = ACTaFN[activation] if activation is not None else nn.Identity()
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = self.convolution(UpperCAmelCase )
a_ = self.normalization(UpperCAmelCase )
a_ = self.activation(UpperCAmelCase )
return hidden_state
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase ):
super().__init__()
a_ = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
a_ = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
a_ = config.num_channels
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
a_ = self.embedder(UpperCAmelCase )
a_ = self.pooler(UpperCAmelCase )
return embedding
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 2 ):
super().__init__()
a_ = nn.Convad(UpperCAmelCase , UpperCAmelCase , kernel_size=1 , stride=UpperCAmelCase , bias=UpperCAmelCase )
a_ = nn.BatchNormad(UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = self.convolution(UpperCAmelCase )
a_ = self.normalization(UpperCAmelCase )
return hidden_state
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1 , UpperCAmelCase = "relu" ):
super().__init__()
a_ = in_channels != out_channels or stride != 1
a_ = (
ResNetShortCut(UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
a_ = nn.Sequential(
ResNetConvLayer(UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase ) , ResNetConvLayer(UpperCAmelCase , UpperCAmelCase , activation=UpperCAmelCase ) , )
a_ = ACTaFN[activation]
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = hidden_state
a_ = self.layer(UpperCAmelCase )
a_ = self.shortcut(UpperCAmelCase )
hidden_state += residual
a_ = self.activation(UpperCAmelCase )
return hidden_state
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1 , UpperCAmelCase = "relu" , UpperCAmelCase = 4 ):
super().__init__()
a_ = in_channels != out_channels or stride != 1
a_ = out_channels // reduction
a_ = (
ResNetShortCut(UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
a_ = nn.Sequential(
ResNetConvLayer(UpperCAmelCase , UpperCAmelCase , kernel_size=1 ) , ResNetConvLayer(UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase ) , ResNetConvLayer(UpperCAmelCase , UpperCAmelCase , kernel_size=1 , activation=UpperCAmelCase ) , )
a_ = ACTaFN[activation]
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = hidden_state
a_ = self.layer(UpperCAmelCase )
a_ = self.shortcut(UpperCAmelCase )
hidden_state += residual
a_ = self.activation(UpperCAmelCase )
return hidden_state
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 2 , UpperCAmelCase = 2 , ):
super().__init__()
a_ = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
a_ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase , activation=config.hidden_act ) , *[layer(UpperCAmelCase , UpperCAmelCase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = input
for layer in self.layers:
a_ = layer(UpperCAmelCase )
return hidden_state
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase ):
super().__init__()
a_ = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
a_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(UpperCAmelCase , config.depths[1:] ):
self.stages.append(ResNetStage(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , depth=UpperCAmelCase ) )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = True ):
a_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
a_ = hidden_states + (hidden_state,)
a_ = stage_module(UpperCAmelCase )
if output_hidden_states:
a_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCAmelCase , hidden_states=UpperCAmelCase , )
class a_ ( UpperCamelCase__ ):
lowerCamelCase__ : List[Any] = ResNetConfig
lowerCamelCase__ : Dict = 'resnet'
lowerCamelCase__ : Dict = 'pixel_values'
lowerCamelCase__ : str = True
def lowerCAmelCase__ ( self , UpperCAmelCase ):
if isinstance(UpperCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=False ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
a_ = value
lowercase__ =r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowercase__ =r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare ResNet model outputting raw features without any specific head on top.' , UpperCamelCase__ , )
class a_ ( UpperCamelCase__ ):
def __init__( self , UpperCAmelCase ):
super().__init__(UpperCAmelCase )
a_ = config
a_ = ResNetEmbeddings(UpperCAmelCase )
a_ = ResNetEncoder(UpperCAmelCase )
a_ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ):
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = self.embedder(UpperCAmelCase )
a_ = self.encoder(
UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase )
a_ = encoder_outputs[0]
a_ = self.pooler(UpperCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase , pooler_output=UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , UpperCamelCase__ , )
class a_ ( UpperCamelCase__ ):
def __init__( self , UpperCAmelCase ):
super().__init__(UpperCAmelCase )
a_ = config.num_labels
a_ = ResNetModel(UpperCAmelCase )
# classification head
a_ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase__ ( self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , ):
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = self.resnet(UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase )
a_ = outputs.pooler_output if return_dict else outputs[1]
a_ = self.classifier(UpperCAmelCase )
a_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
a_ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
a_ = """single_label_classification"""
else:
a_ = """multi_label_classification"""
if self.config.problem_type == "regression":
a_ = MSELoss()
if self.num_labels == 1:
a_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
a_ = loss_fct(UpperCAmelCase , UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
a_ = CrossEntropyLoss()
a_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
a_ = BCEWithLogitsLoss()
a_ = loss_fct(UpperCAmelCase , UpperCAmelCase )
if not return_dict:
a_ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCAmelCase , logits=UpperCAmelCase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , UpperCamelCase__ , )
class a_ ( UpperCamelCase__ , UpperCamelCase__ ):
def __init__( self , UpperCAmelCase ):
super().__init__(UpperCAmelCase )
super()._init_backbone(UpperCAmelCase )
a_ = [config.embedding_size] + config.hidden_sizes
a_ = ResNetEmbeddings(UpperCAmelCase )
a_ = ResNetEncoder(UpperCAmelCase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@replace_return_docstrings(output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ):
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = self.embedder(UpperCAmelCase )
a_ = self.encoder(UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase )
a_ = outputs.hidden_states
a_ = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
a_ = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=UpperCAmelCase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=UpperCAmelCase , )
| 263 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ = 1_6
a_ = 3_2
def _a( UpperCamelCase__ : Accelerator, UpperCamelCase__ : int = 1_6 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =AutoTokenizer.from_pretrained('''bert-base-cased''' )
SCREAMING_SNAKE_CASE__ : Dict =load_dataset('''glue''', '''mrpc''' )
def tokenize_function(UpperCamelCase__ : Tuple ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ : str =tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=UpperCamelCase__, max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ : List[str] =datasets.map(
UpperCamelCase__, batched=UpperCamelCase__, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ : int =tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(UpperCamelCase__ : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ : str =1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ : Any =1_6
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ : Tuple =8
else:
SCREAMING_SNAKE_CASE__ : str =None
return tokenizer.pad(
UpperCamelCase__, padding='''longest''', max_length=UpperCamelCase__, pad_to_multiple_of=UpperCamelCase__, return_tensors='''pt''', )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ : List[Any] =DataLoader(
tokenized_datasets['''train'''], shuffle=UpperCamelCase__, collate_fn=UpperCamelCase__, batch_size=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =DataLoader(
tokenized_datasets['''validation'''], shuffle=UpperCamelCase__, collate_fn=UpperCamelCase__, batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ = mocked_dataloaders # noqa: F811
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : List[Any] ):
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', UpperCamelCase__ ) == "1":
SCREAMING_SNAKE_CASE__ : Optional[int] =2
# New Code #
SCREAMING_SNAKE_CASE__ : int =int(args.gradient_accumulation_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE__ : Dict =Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=UpperCamelCase__ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ : Optional[int] =config['''lr''']
SCREAMING_SNAKE_CASE__ : Dict =int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =int(config['''seed'''] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =int(config['''batch_size'''] )
SCREAMING_SNAKE_CASE__ : Optional[int] =evaluate.load('''glue''', '''mrpc''' )
set_seed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] =get_dataloaders(UpperCamelCase__, UpperCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ : List[Any] =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=UpperCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ : str =model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ : Dict =AdamW(params=model.parameters(), lr=UpperCamelCase__ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ : Optional[Any] =get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__, num_warmup_steps=1_0_0, num_training_steps=(len(UpperCamelCase__ ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =accelerator.prepare(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ ):
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =output.loss
accelerator.backward(UpperCamelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple =model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str =outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCamelCase__, references=UpperCamelCase__, )
SCREAMING_SNAKE_CASE__ : List[str] =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:", UpperCamelCase__ )
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=UpperCamelCase__, default=UpperCamelCase__, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''', type=UpperCamelCase__, default=1, help='''The number of minibatches to be ran before gradients are accumulated.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
SCREAMING_SNAKE_CASE__ : List[Any] =parser.parse_args()
SCREAMING_SNAKE_CASE__ : Dict ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(UpperCamelCase__, UpperCamelCase__ )
if __name__ == "__main__":
main() | 665 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __magic_name__ ( *__lowercase : int , **__lowercase : Optional[Any] ) -> Optional[Any]:
pass
@is_pipeline_test
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@require_torch
def __magic_name__ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
SCREAMING_SNAKE_CASE__ : List[str] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__lowercase ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
SCREAMING_SNAKE_CASE__ : int =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
] , )
@require_tf
def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
SCREAMING_SNAKE_CASE__ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Tuple =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
] , )
@slow
@require_torch
def __magic_name__ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE__ : Any =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : List[str] =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def __magic_name__ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ : str =pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE__ : Optional[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , ) | 665 | 1 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
UpperCAmelCase = True
except (ImportError, ModuleNotFoundError):
UpperCAmelCase = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def lowercase ( a__ : str ) -> str:
re.sub('''<n>''' , '''''' , UpperCAmelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCAmelCase__ ) )
| 420 | '''simple docstring'''
from __future__ import annotations
import math
import random
from typing import Any
class _SCREAMING_SNAKE_CASE:
def __init__( self : str ) -> None:
SCREAMING_SNAKE_CASE__ :list[Any] = []
SCREAMING_SNAKE_CASE__ :int = 0
SCREAMING_SNAKE_CASE__ :int = 0
def __lowerCamelCase ( self : Any ) -> bool:
return self.head == self.tail
def __lowerCamelCase ( self : Any , UpperCamelCase_ : Any ) -> None:
self.data.append(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :int = self.tail + 1
def __lowerCamelCase ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = self.data[self.head]
SCREAMING_SNAKE_CASE__ :Union[str, Any] = self.head + 1
return ret
def __lowerCamelCase ( self : str ) -> int:
return self.tail - self.head
def __lowerCamelCase ( self : Optional[int] ) -> None:
print(self.data )
print('**************' )
print(self.data[self.head : self.tail] )
class _SCREAMING_SNAKE_CASE:
def __init__( self : List[str] , UpperCamelCase_ : Any ) -> None:
SCREAMING_SNAKE_CASE__ :Tuple = data
SCREAMING_SNAKE_CASE__ :MyNode | None = None
SCREAMING_SNAKE_CASE__ :MyNode | None = None
SCREAMING_SNAKE_CASE__ :int = 1
def __lowerCamelCase ( self : Union[str, Any] ) -> Any:
return self.data
def __lowerCamelCase ( self : Optional[int] ) -> MyNode | None:
return self.left
def __lowerCamelCase ( self : List[str] ) -> MyNode | None:
return self.right
def __lowerCamelCase ( self : List[Any] ) -> int:
return self.height
def __lowerCamelCase ( self : Optional[Any] , UpperCamelCase_ : Any ) -> None:
SCREAMING_SNAKE_CASE__ :List[str] = data
def __lowerCamelCase ( self : Dict , UpperCamelCase_ : MyNode | None ) -> None:
SCREAMING_SNAKE_CASE__ :Dict = node
def __lowerCamelCase ( self : Optional[int] , UpperCamelCase_ : MyNode | None ) -> None:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = node
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : int ) -> None:
SCREAMING_SNAKE_CASE__ :Dict = height
def lowerCamelCase ( UpperCAmelCase__ : MyNode | None ) -> int:
'''simple docstring'''
if node is None:
return 0
return node.get_height()
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
'''simple docstring'''
if a > b:
return a
return b
def lowerCamelCase ( UpperCAmelCase__ : MyNode ) -> MyNode:
'''simple docstring'''
print('left rotation node:' , node.get_data() )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :Any = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :Dict = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(UpperCAmelCase__ )
return ret
def lowerCamelCase ( UpperCAmelCase__ : MyNode ) -> MyNode:
'''simple docstring'''
print('right rotation node:' , node.get_data() )
SCREAMING_SNAKE_CASE__ :Tuple = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :int = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(UpperCAmelCase__ )
return ret
def lowerCamelCase ( UpperCAmelCase__ : MyNode ) -> MyNode:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = node.get_left()
assert left_child is not None
node.set_left(left_rotation(UpperCAmelCase__ ) )
return right_rotation(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : MyNode ) -> MyNode:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = node.get_right()
assert right_child is not None
node.set_right(right_rotation(UpperCAmelCase__ ) )
return left_rotation(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : MyNode | None , UpperCAmelCase__ : Any ) -> MyNode | None:
'''simple docstring'''
if node is None:
return MyNode(UpperCAmelCase__ )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , UpperCAmelCase__ ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
SCREAMING_SNAKE_CASE__ :Union[str, Any] = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
SCREAMING_SNAKE_CASE__ :Dict = right_rotation(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE__ :str = lr_rotation(UpperCAmelCase__ )
else:
node.set_right(insert_node(node.get_right() , UpperCAmelCase__ ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
SCREAMING_SNAKE_CASE__ :Optional[int] = node.get_right()
assert right_child is not None
if data < right_child.get_data():
SCREAMING_SNAKE_CASE__ :Optional[Any] = rl_rotation(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = left_rotation(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :Tuple = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase__ )
return node
def lowerCamelCase ( UpperCAmelCase__ : MyNode ) -> Any:
'''simple docstring'''
while True:
SCREAMING_SNAKE_CASE__ :Optional[int] = root.get_right()
if right_child is None:
break
SCREAMING_SNAKE_CASE__ :str = right_child
return root.get_data()
def lowerCamelCase ( UpperCAmelCase__ : MyNode ) -> Any:
'''simple docstring'''
while True:
SCREAMING_SNAKE_CASE__ :Optional[int] = root.get_left()
if left_child is None:
break
SCREAMING_SNAKE_CASE__ :List[Any] = left_child
return root.get_data()
def lowerCamelCase ( UpperCAmelCase__ : MyNode , UpperCAmelCase__ : Any ) -> MyNode | None:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = root.get_left()
SCREAMING_SNAKE_CASE__ :Optional[int] = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
SCREAMING_SNAKE_CASE__ :int = get_left_most(UpperCAmelCase__ )
root.set_data(UpperCAmelCase__ )
root.set_right(del_node(UpperCAmelCase__ , UpperCAmelCase__ ) )
elif left_child is not None:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = left_child
elif right_child is not None:
SCREAMING_SNAKE_CASE__ :Optional[int] = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('No such data' )
return root
else:
root.set_left(del_node(UpperCAmelCase__ , UpperCAmelCase__ ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(UpperCAmelCase__ , UpperCAmelCase__ ) )
if get_height(UpperCAmelCase__ ) - get_height(UpperCAmelCase__ ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
SCREAMING_SNAKE_CASE__ :Any = left_rotation(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE__ :int = rl_rotation(UpperCAmelCase__ )
elif get_height(UpperCAmelCase__ ) - get_height(UpperCAmelCase__ ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
SCREAMING_SNAKE_CASE__ :Any = right_rotation(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE__ :str = lr_rotation(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(UpperCAmelCase__ )
return root
class _SCREAMING_SNAKE_CASE:
def __init__( self : List[Any] ) -> None:
SCREAMING_SNAKE_CASE__ :MyNode | None = None
def __lowerCamelCase ( self : Optional[Any] ) -> int:
return get_height(self.root )
def __lowerCamelCase ( self : int , UpperCamelCase_ : Any ) -> None:
print('insert:' + str(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ :Dict = insert_node(self.root , UpperCamelCase_ )
def __lowerCamelCase ( self : str , UpperCamelCase_ : Any ) -> None:
print('delete:' + str(UpperCamelCase_ ) )
if self.root is None:
print('Tree is empty!' )
return
SCREAMING_SNAKE_CASE__ :List[Any] = del_node(self.root , UpperCamelCase_ )
def __str__( self : List[Any] , ) -> str: # a level traversale, gives a more intuitive look on the tree
SCREAMING_SNAKE_CASE__ :List[str] = ''
SCREAMING_SNAKE_CASE__ :Optional[Any] = MyQueue()
q.push(self.root )
SCREAMING_SNAKE_CASE__ :int = self.get_height()
if layer == 0:
return output
SCREAMING_SNAKE_CASE__ :str = 0
while not q.is_empty():
SCREAMING_SNAKE_CASE__ :Optional[int] = q.pop()
SCREAMING_SNAKE_CASE__ :List[str] = ' ' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(UpperCamelCase_ )
q.push(UpperCamelCase_ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
SCREAMING_SNAKE_CASE__ :Tuple = cnt + 1
for i in range(1_00 ):
if cnt == math.pow(2 , UpperCamelCase_ ) - 1:
SCREAMING_SNAKE_CASE__ :Optional[int] = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def lowerCamelCase ( ) -> None:
'''simple docstring'''
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
UpperCamelCase_ = AVLtree()
UpperCamelCase_ = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 209 | 0 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _UpperCAmelCase ( a : int , a : int , a : int , a : int , a : int , a : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
snake_case__ = ksize + 1
snake_case__ = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(a ):
for x in range(a ):
# distance from center
snake_case__ = x - ksize // 2
snake_case__ = y - ksize // 2
# degree to radiant
snake_case__ = theta / 180 * np.pi
snake_case__ = np.cos(_theta )
snake_case__ = np.sin(_theta )
# get kernel x
snake_case__ = cos_theta * px + sin_theta * py
# get kernel y
snake_case__ = -sin_theta * px + cos_theta * py
# fill kernel
snake_case__ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
a__ = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
a__ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
a__ = np.zeros(gray.shape[:2])
for theta in [0, 3_0, 6_0, 9_0, 1_2_0, 1_5_0]:
a__ = gabor_filter_kernel(1_0, 8, theta, 1_0, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
a__ = out / out.max() * 2_5_5
a__ = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 720 |
import random
from typing import Any
def _UpperCAmelCase ( a : list ):
for _ in range(len(a ) ):
snake_case__ = random.randint(0 , len(a ) - 1 )
snake_case__ = random.randint(0 , len(a ) - 1 )
snake_case__ , snake_case__ = data[b], data[a]
return data
if __name__ == "__main__":
a__ = [0, 1, 2, 3, 4, 5, 6, 7]
a__ = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 99 | 0 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __A :
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=16 , A=36 , A=6 , A=6 , A=6 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> List[str]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = embedding_size
_a = hidden_size
_a = num_hidden_layers
_a = num_hidden_groups
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ) -> Tuple:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def a__ (self , A , A , A , A , A , A , A ) -> int:
"""simple docstring"""
_a = AlbertModel(config=A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , token_type_ids=A )
_a = model(A , token_type_ids=A )
_a = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ (self , A , A , A , A , A , A , A ) -> str:
"""simple docstring"""
_a = AlbertForPreTraining(config=A )
model.to(A )
model.eval()
_a = model(
A , attention_mask=A , token_type_ids=A , labels=A , sentence_order_label=A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def a__ (self , A , A , A , A , A , A , A ) -> Optional[int]:
"""simple docstring"""
_a = AlbertForMaskedLM(config=A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self , A , A , A , A , A , A , A ) -> Dict:
"""simple docstring"""
_a = AlbertForQuestionAnswering(config=A )
model.to(A )
model.eval()
_a = model(
A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ (self , A , A , A , A , A , A , A ) -> Optional[int]:
"""simple docstring"""
_a = self.num_labels
_a = AlbertForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ (self , A , A , A , A , A , A , A ) -> str:
"""simple docstring"""
_a = self.num_labels
_a = AlbertForTokenClassification(config=A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self , A , A , A , A , A , A , A ) -> Any:
"""simple docstring"""
_a = self.num_choices
_a = AlbertForMultipleChoice(config=A )
model.to(A )
model.eval()
_a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCamelCase : str = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : str = True
def a__ (self , A , A , A=False ) -> Optional[Any]:
"""simple docstring"""
_a = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class in get_values(A ):
_a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A )
_a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = AlbertModelTester(self )
_a = ConfigTester(self , config_class=A , hidden_size=37 )
def a__ (self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> int:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A )
def a__ (self ) -> Dict:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a = type
self.model_tester.create_and_check_model(*A )
@slow
def a__ (self ) -> Any:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = AlbertModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = AlbertModel.from_pretrained('''albert-base-v2''' )
_a = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_a = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_a = model(A , attention_mask=A )[0]
_a = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , A )
_a = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A , atol=1E-4 ) )
| 11 |
"""simple docstring"""
from string import ascii_uppercase
__lowerCamelCase :Dict = {char: i for i, char in enumerate(ascii_uppercase)}
__lowerCamelCase :str = dict(enumerate(ascii_uppercase))
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> str:
lowerCamelCase : Tuple = len(UpperCamelCase__ )
lowerCamelCase : Optional[Any] = 0
while True:
if x == i:
lowerCamelCase : Tuple = 0
if len(UpperCamelCase__ ) == len(UpperCamelCase__ ):
break
key += key[i]
i += 1
return key
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> str:
lowerCamelCase : Any = """"""
lowerCamelCase : Optional[Any] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
lowerCamelCase : Union[str, Any] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> str:
lowerCamelCase : Dict = """"""
lowerCamelCase : int = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
lowerCamelCase : int = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def snake_case ( ) -> None:
lowerCamelCase : int = """THE GERMAN ATTACK"""
lowerCamelCase : Union[str, Any] = """SECRET"""
lowerCamelCase : Union[str, Any] = generate_key(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[int] = cipher_text(UpperCamelCase__ , UpperCamelCase__ )
print(F'Encrypted Text = {s}' )
print(F'Original Text = {original_text(UpperCamelCase__ , UpperCamelCase__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 222 | 0 |
"""simple docstring"""
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
a = get_logger(__name__)
def lowercase (snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : str=0 ) -> Union[str, Any]:
'''simple docstring'''
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with FSDP.state_dict_type(
snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowerCAmelCase = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowerCAmelCase = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
lowerCAmelCase = os.path.join(snake_case__ , snake_case__ )
if accelerator.process_index == 0:
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(snake_case__ , snake_case__ )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowerCAmelCase = (
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
lowerCAmelCase = os.path.join(snake_case__ , snake_case__ )
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(snake_case__ , snake_case__ )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowerCAmelCase = os.path.join(snake_case__ , f'''{MODEL_NAME}_{model_index}''' )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
logger.info(f'''Saving model to {ckpt_dir}''' )
lowerCAmelCase = {"""model""": state_dict}
dist_cp.save_state_dict(
state_dict=snake_case__ , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , )
logger.info(f'''Model saved to {ckpt_dir}''' )
def lowercase (snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any]=0 ) -> Optional[Any]:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(snake_case__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"""Set the `sync_module_states` flag to `True` so that model states are synced across processes when """
"""initializing FSDP object""" )
return
lowerCAmelCase = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
lowerCAmelCase = os.path.join(snake_case__ , snake_case__ )
logger.info(f'''Loading model from {input_model_file}''' )
lowerCAmelCase = torch.load(snake_case__ )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowerCAmelCase = (
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
lowerCAmelCase = os.path.join(snake_case__ , snake_case__ )
logger.info(f'''Loading model from {input_model_file}''' )
lowerCAmelCase = torch.load(snake_case__ )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowerCAmelCase = (
os.path.join(snake_case__ , f'''{MODEL_NAME}_{model_index}''' )
if f'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading model from {ckpt_dir}''' )
lowerCAmelCase = {"""model""": model.state_dict()}
dist_cp.load_state_dict(
state_dict=snake_case__ , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , planner=DefaultLoadPlanner() , )
lowerCAmelCase = state_dict["""model"""]
logger.info(f'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(snake_case__ )
def lowercase (snake_case__ : Dict , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : Tuple , snake_case__ : List[Any]=0 ) -> str:
'''simple docstring'''
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with FSDP.state_dict_type(
snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowerCAmelCase = FSDP.optim_state_dict(snake_case__ , snake_case__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
lowerCAmelCase = (
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
lowerCAmelCase = os.path.join(snake_case__ , snake_case__ )
logger.info(f'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(snake_case__ , snake_case__ )
logger.info(f'''Optimizer state saved in {output_optimizer_file}''' )
else:
lowerCAmelCase = os.path.join(snake_case__ , f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
logger.info(f'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , )
logger.info(f'''Optimizer state saved in {ckpt_dir}''' )
def lowercase (snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : int=0 ) -> str:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowerCAmelCase = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
lowerCAmelCase = (
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
lowerCAmelCase = os.path.join(snake_case__ , snake_case__ )
logger.info(f'''Loading Optimizer state from {input_optimizer_file}''' )
lowerCAmelCase = torch.load(snake_case__ )
logger.info(f'''Optimizer state loaded from {input_optimizer_file}''' )
else:
lowerCAmelCase = (
os.path.join(snake_case__ , f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if f'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading Optimizer from {ckpt_dir}''' )
lowerCAmelCase = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , )
lowerCAmelCase = optim_state["""optimizer"""]
logger.info(f'''Optimizer loaded from {ckpt_dir}''' )
lowerCAmelCase = FSDP.optim_state_dict_to_load(snake_case__ , snake_case__ , snake_case__ )
optimizer.load_state_dict(snake_case__ )
| 716 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a = 1_6
a = 3_2
def lowercase (snake_case__ : Accelerator , snake_case__ : int = 16 ) -> Dict:
'''simple docstring'''
lowerCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(snake_case__ : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase = 8
else:
lowerCAmelCase = None
return tokenizer.pad(
snake_case__ , padding="""longest""" , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCAmelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
lowerCAmelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a = mocked_dataloaders # noqa: F811
def lowercase (snake_case__ : int , snake_case__ : Tuple ) -> int:
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , snake_case__ ) == "1":
lowerCAmelCase = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowerCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
lowerCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase = config["""lr"""]
lowerCAmelCase = int(config["""num_epochs"""] )
lowerCAmelCase = int(config["""seed"""] )
lowerCAmelCase = int(config["""batch_size"""] )
set_seed(snake_case__ )
lowerCAmelCase , lowerCAmelCase = get_dataloaders(snake_case__ , snake_case__ )
lowerCAmelCase = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase = AdamW(params=model.parameters() , lr=snake_case__ )
# Instantiate scheduler
lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=100 , num_training_steps=(len(snake_case__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowerCAmelCase = os.path.split(snake_case__ )[-1].split(""".""" )[0]
accelerator.init_trackers(snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowerCAmelCase = 0
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase = model(**snake_case__ )
lowerCAmelCase = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowerCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase = model(**snake_case__ )
lowerCAmelCase = outputs.logits.argmax(dim=-1 )
lowerCAmelCase , lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
lowerCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , snake_case__ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"""accuracy""": eval_metric["""accuracy"""],
"""f1""": eval_metric["""f1"""],
"""train_loss""": total_loss.item() / len(snake_case__ ),
"""epoch""": epoch,
} , step=snake_case__ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowercase () -> str:
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=snake_case__ , default=snake_case__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=snake_case__ , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 529 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
__a = list[list[float | int]]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
snake_case__ : Any = len(_A )
snake_case__ : Any = [[0 for _ in range(size + 1 )] for _ in range(_A )]
snake_case__ : List[str] = 42
snake_case__ : str = 42
snake_case__ : int = 42
snake_case__ : Dict = 42
snake_case__ : Optional[Any] = 42
snake_case__ : str = 42
for row in range(_A ):
for col in range(_A ):
snake_case__ : Optional[int] = matrix[row][col]
snake_case__ : List[Any] = vector[row][0]
snake_case__ : Tuple = 0
snake_case__ : List[str] = 0
while row < size and col < size:
# pivoting
snake_case__ : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_A , _A ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
snake_case__ , snake_case__ : int = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _A ):
snake_case__ : Optional[Any] = augmented[rowa][col] / augmented[row][col]
snake_case__ : Optional[int] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _A ):
for row in range(_A ):
snake_case__ : Optional[Any] = augmented[row][col] / augmented[col][col]
for cola in range(_A , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_A )
]
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : List[str] = len(_A )
snake_case__ : str = [[0 for _ in range(_A )] for _ in range(_A )]
snake_case__ : Tuple = [[0] for _ in range(_A )]
snake_case__ : Optional[Any] = 42
snake_case__ : str = 42
snake_case__ : Optional[int] = 42
snake_case__ : Optional[int] = 42
for x_val, y_val in enumerate(_A ):
for col in range(_A ):
snake_case__ : Optional[Any] = (x_val + 1) ** (size - col - 1)
snake_case__ : List[str] = y_val
snake_case__ : List[Any] = solve(_A , _A )
def interpolated_func(_lowerCAmelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_A ) )
return interpolated_func
def __snake_case( _lowerCAmelCase ) -> str:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __snake_case( _lowerCAmelCase = question_function , _lowerCAmelCase = 10 ) -> Optional[Any]:
snake_case__ : Any = [func(_A ) for x_val in range(1 , order + 1 )]
snake_case__ : Union[str, Any] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
snake_case__ : Union[str, Any] = 0
snake_case__ : List[Any] = 42
snake_case__ : Tuple = 42
for poly in polynomials:
snake_case__ : List[str] = 1
while func(_A ) == poly(_A ):
x_val += 1
ret += poly(_A )
return ret
if __name__ == "__main__":
print(F"{solution() = }")
| 374 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
lowercase__ : Optional[Any] = logging.get_logger(__name__)
# General docstring
lowercase__ : Optional[int] = "PoolFormerConfig"
# Base docstring
lowercase__ : Optional[Any] = "sail/poolformer_s12"
lowercase__ : Union[str, Any] = [1, 512, 7, 7]
# Image classification docstring
lowercase__ : List[str] = "sail/poolformer_s12"
lowercase__ : Dict = "tabby, tabby cat"
lowercase__ : Union[str, Any] = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowerCamelCase__ ( _A , _A = 0.0 , _A = False ):
'''simple docstring'''
if drop_prob == 0.0 or not training:
return input
snake_case_ = 1 - drop_prob
snake_case_ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
snake_case_ = keep_prob + torch.rand(_A , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
snake_case_ = input.div(_A ) * random_tensor
return output
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowercase : Optional[float] = None ):
"""simple docstring"""
super().__init__()
snake_case_ = drop_prob
def snake_case__ ( self : List[str] , __lowercase : torch.Tensor ):
"""simple docstring"""
return drop_path(__lowercase , self.drop_prob , self.training )
def snake_case__ ( self : Any ):
"""simple docstring"""
return "p={}".format(self.drop_prob )
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowercase : Dict , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : int , __lowercase : Union[str, Any] , __lowercase : Dict=None ):
"""simple docstring"""
super().__init__()
snake_case_ = patch_size if isinstance(__lowercase , collections.abc.Iterable ) else (patch_size, patch_size)
snake_case_ = stride if isinstance(__lowercase , collections.abc.Iterable ) else (stride, stride)
snake_case_ = padding if isinstance(__lowercase , collections.abc.Iterable ) else (padding, padding)
snake_case_ = nn.Convad(__lowercase , __lowercase , kernel_size=__lowercase , stride=__lowercase , padding=__lowercase )
snake_case_ = norm_layer(__lowercase ) if norm_layer else nn.Identity()
def snake_case__ ( self : Any , __lowercase : Optional[Any] ):
"""simple docstring"""
snake_case_ = self.projection(__lowercase )
snake_case_ = self.norm(__lowercase )
return embeddings
class UpperCAmelCase ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self : int , __lowercase : List[str] , **__lowercase : Union[str, Any] ):
"""simple docstring"""
super().__init__(1 , __lowercase , **__lowercase )
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __lowercase : List[str] ):
"""simple docstring"""
super().__init__()
snake_case_ = nn.AvgPoolad(__lowercase , stride=1 , padding=pool_size // 2 , count_include_pad=__lowercase )
def snake_case__ ( self : int , __lowercase : Tuple ):
"""simple docstring"""
return self.pool(__lowercase ) - hidden_states
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[Any] , __lowercase : Optional[Any] ):
"""simple docstring"""
super().__init__()
snake_case_ = nn.Convad(__lowercase , __lowercase , 1 )
snake_case_ = nn.Convad(__lowercase , __lowercase , 1 )
snake_case_ = PoolFormerDropPath(__lowercase )
if isinstance(config.hidden_act , __lowercase ):
snake_case_ = ACTaFN[config.hidden_act]
else:
snake_case_ = config.hidden_act
def snake_case__ ( self : Union[str, Any] , __lowercase : int ):
"""simple docstring"""
snake_case_ = self.conva(__lowercase )
snake_case_ = self.act_fn(__lowercase )
snake_case_ = self.drop(__lowercase )
snake_case_ = self.conva(__lowercase )
snake_case_ = self.drop(__lowercase )
return hidden_states
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , __lowercase : List[str] , __lowercase : Optional[Any] , __lowercase : str , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : int ):
"""simple docstring"""
super().__init__()
snake_case_ = PoolFormerPooling(__lowercase )
snake_case_ = PoolFormerOutput(__lowercase , __lowercase , __lowercase , __lowercase )
snake_case_ = PoolFormerGroupNorm(__lowercase )
snake_case_ = PoolFormerGroupNorm(__lowercase )
# Useful for training neural nets
snake_case_ = PoolFormerDropPath(__lowercase ) if drop_path > 0.0 else nn.Identity()
snake_case_ = config.use_layer_scale
if config.use_layer_scale:
snake_case_ = nn.Parameter(
config.layer_scale_init_value * torch.ones((__lowercase) ) , requires_grad=__lowercase )
snake_case_ = nn.Parameter(
config.layer_scale_init_value * torch.ones((__lowercase) ) , requires_grad=__lowercase )
def snake_case__ ( self : Union[str, Any] , __lowercase : Union[str, Any] ):
"""simple docstring"""
if self.use_layer_scale:
snake_case_ = self.pooling(self.before_norm(__lowercase ) )
snake_case_ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
snake_case_ = hidden_states + self.drop_path(__lowercase )
snake_case_ = ()
snake_case_ = self.output(self.after_norm(__lowercase ) )
snake_case_ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
snake_case_ = hidden_states + self.drop_path(__lowercase )
snake_case_ = (output,) + outputs
return outputs
else:
snake_case_ = self.drop_path(self.pooling(self.before_norm(__lowercase ) ) )
# First residual connection
snake_case_ = pooling_output + hidden_states
snake_case_ = ()
# Second residual connection inside the PoolFormerOutput block
snake_case_ = self.drop_path(self.output(self.after_norm(__lowercase ) ) )
snake_case_ = hidden_states + layer_output
snake_case_ = (output,) + outputs
return outputs
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , __lowercase : Optional[Any] ):
"""simple docstring"""
super().__init__()
snake_case_ = config
# stochastic depth decay rule
snake_case_ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
snake_case_ = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
snake_case_ = nn.ModuleList(__lowercase )
# Transformer blocks
snake_case_ = []
snake_case_ = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
snake_case_ = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__lowercase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__lowercase ) )
snake_case_ = nn.ModuleList(__lowercase )
def snake_case__ ( self : List[str] , __lowercase : List[Any] , __lowercase : int=False , __lowercase : Tuple=True ):
"""simple docstring"""
snake_case_ = () if output_hidden_states else None
snake_case_ = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
snake_case_ , snake_case_ = layers
# Get patch embeddings from hidden_states
snake_case_ = embedding_layer(__lowercase )
# Send the embeddings through the blocks
for _, blk in enumerate(__lowercase ):
snake_case_ = blk(__lowercase )
snake_case_ = layer_outputs[0]
if output_hidden_states:
snake_case_ = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__lowercase , hidden_states=__lowercase )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = PoolFormerConfig
lowerCAmelCase_ = '''poolformer'''
lowerCAmelCase_ = '''pixel_values'''
lowerCAmelCase_ = True
def snake_case__ ( self : Dict , __lowercase : Optional[Any] ):
"""simple docstring"""
if isinstance(__lowercase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__lowercase , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def snake_case__ ( self : str , __lowercase : Any , __lowercase : Tuple=False ):
"""simple docstring"""
if isinstance(__lowercase , __lowercase ):
snake_case_ = value
lowercase__ : Optional[int] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
lowercase__ : List[str] = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , UpperCAmelCase__ , )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Any , __lowercase : Any ):
"""simple docstring"""
super().__init__(__lowercase )
snake_case_ = config
snake_case_ = PoolFormerEncoder(__lowercase )
# Initialize weights and apply final processing
self.post_init()
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case__ ( self : Optional[Any] , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[bool] = None , ):
"""simple docstring"""
snake_case_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
snake_case_ = self.encoder(
__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase , )
snake_case_ = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__lowercase , hidden_states=encoder_outputs.hidden_states , )
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __lowercase : Tuple ):
"""simple docstring"""
super().__init__()
snake_case_ = nn.Linear(config.hidden_size , config.hidden_size )
def snake_case__ ( self : Dict , __lowercase : List[str] ):
"""simple docstring"""
snake_case_ = self.dense(__lowercase )
return output
@add_start_docstrings(
'''
PoolFormer Model transformer with an image classification head on top
''' , UpperCAmelCase__ , )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Optional[int] , __lowercase : int ):
"""simple docstring"""
super().__init__(__lowercase )
snake_case_ = config.num_labels
snake_case_ = PoolFormerModel(__lowercase )
# Final norm
snake_case_ = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
snake_case_ = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case__ ( self : str , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[torch.LongTensor] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[bool] = None , ):
"""simple docstring"""
snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ = self.poolformer(
__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase , )
snake_case_ = outputs[0]
snake_case_ = self.classifier(self.norm(__lowercase ).mean([-2, -1] ) )
snake_case_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case_ = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case_ = "single_label_classification"
else:
snake_case_ = "multi_label_classification"
if self.config.problem_type == "regression":
snake_case_ = MSELoss()
if self.num_labels == 1:
snake_case_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case_ = loss_fct(__lowercase , __lowercase )
elif self.config.problem_type == "single_label_classification":
snake_case_ = CrossEntropyLoss()
snake_case_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case_ = BCEWithLogitsLoss()
snake_case_ = loss_fct(__lowercase , __lowercase )
if not return_dict:
snake_case_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__lowercase , logits=__lowercase , hidden_states=outputs.hidden_states )
| 376 | 0 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = "codegen"
lowerCAmelCase__ : Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , snake_case : Dict=50400 , snake_case : Union[str, Any]=2048 , snake_case : List[str]=2048 , snake_case : List[Any]=4096 , snake_case : Union[str, Any]=28 , snake_case : Any=16 , snake_case : Optional[int]=64 , snake_case : str=None , snake_case : Union[str, Any]="gelu_new" , snake_case : Union[str, Any]=0.0 , snake_case : Any=0.0 , snake_case : Tuple=0.0 , snake_case : Tuple=1E-5 , snake_case : Dict=0.02 , snake_case : Dict=True , snake_case : Dict=50256 , snake_case : Union[str, Any]=50256 , snake_case : str=False , **snake_case : str , ):
__UpperCamelCase = vocab_size
__UpperCamelCase = n_ctx
__UpperCamelCase = n_positions
__UpperCamelCase = n_embd
__UpperCamelCase = n_layer
__UpperCamelCase = n_head
__UpperCamelCase = n_inner
__UpperCamelCase = rotary_dim
__UpperCamelCase = activation_function
__UpperCamelCase = resid_pdrop
__UpperCamelCase = embd_pdrop
__UpperCamelCase = attn_pdrop
__UpperCamelCase = layer_norm_epsilon
__UpperCamelCase = initializer_range
__UpperCamelCase = use_cache
__UpperCamelCase = bos_token_id
__UpperCamelCase = eos_token_id
super().__init__(
bos_token_id=snake_case , eos_token_id=snake_case , tie_word_embeddings=snake_case , **snake_case )
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Any , snake_case : PretrainedConfig , snake_case : str = "default" , snake_case : List[PatchingSpec] = None , snake_case : bool = False , ):
super().__init__(snake_case , task=snake_case , patching_specs=snake_case , use_past=snake_case )
if not getattr(self._config , '''pad_token_id''' , snake_case ):
# TODO: how to do that better?
__UpperCamelCase = 0
@property
def snake_case ( self : Dict ):
__UpperCamelCase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(snake_case , direction='''inputs''' )
__UpperCamelCase = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__UpperCamelCase = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def snake_case ( self : Union[str, Any] ):
return self._config.n_layer
@property
def snake_case ( self : Optional[Any] ):
return self._config.n_head
def snake_case ( self : str , snake_case : PreTrainedTokenizer , snake_case : int = -1 , snake_case : int = -1 , snake_case : bool = False , snake_case : Optional[TensorType] = None , ):
__UpperCamelCase = super(snake_case , self ).generate_dummy_inputs(
snake_case , batch_size=snake_case , seq_length=snake_case , is_pair=snake_case , framework=snake_case )
# We need to order the input in the way they appears in the forward()
__UpperCamelCase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__UpperCamelCase , __UpperCamelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__UpperCamelCase = seqlen + 2
__UpperCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCamelCase = [
(torch.zeros(snake_case ), torch.zeros(snake_case )) for _ in range(self.num_layers )
]
__UpperCamelCase = common_inputs['''attention_mask''']
if self.use_past:
__UpperCamelCase = ordered_inputs['''attention_mask'''].dtype
__UpperCamelCase = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(snake_case , snake_case , dtype=snake_case )] , dim=1 )
return ordered_inputs
@property
def snake_case ( self : str ):
return 13
| 708 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
"configuration_longformer": [
"LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LongformerConfig",
"LongformerOnnxConfig",
],
"tokenization_longformer": ["LongformerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["LongformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongformerForMaskedLM",
"LongformerForMultipleChoice",
"LongformerForQuestionAnswering",
"LongformerForSequenceClassification",
"LongformerForTokenClassification",
"LongformerModel",
"LongformerPreTrainedModel",
"LongformerSelfAttention",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLongformerForMaskedLM",
"TFLongformerForMultipleChoice",
"TFLongformerForQuestionAnswering",
"TFLongformerForSequenceClassification",
"TFLongformerForTokenClassification",
"TFLongformerModel",
"TFLongformerPreTrainedModel",
"TFLongformerSelfAttention",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 375 | 0 |
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"--original_config_file",
default=None,
type=str,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--scheduler_type",
default="pndm",
type=str,
help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']",
)
parser.add_argument(
"--pipeline_type",
default=None,
type=str,
help=(
"The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"
". If `None` pipeline will be automatically inferred."
),
)
parser.add_argument(
"--image_size",
default=None,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--prediction_type",
default=None,
type=str,
help=(
"The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"
" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
parser.add_argument(
"--stable_unclip",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.",
)
parser.add_argument(
"--stable_unclip_prior",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.",
)
parser.add_argument(
"--clip_stats_path",
type=str,
help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.",
required=False,
)
parser.add_argument(
"--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint."
)
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--vae_path",
type=str,
default=None,
required=False,
help="Set to a path, hub id to an already converted vae to not convert it again.",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 267 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
SCREAMING_SNAKE_CASE__ = None
try:
import msvcrt
except ImportError:
SCREAMING_SNAKE_CASE__ = None
try:
import fcntl
except ImportError:
SCREAMING_SNAKE_CASE__ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
SCREAMING_SNAKE_CASE__ = OSError
# Data
# ------------------------------------------------
SCREAMING_SNAKE_CASE__ = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
SCREAMING_SNAKE_CASE__ = "3.0.12"
SCREAMING_SNAKE_CASE__ = None
def lowerCamelCase ( ):
'''simple docstring'''
global _logger
lowercase__ = _logger or logging.getLogger(__name__ )
return _logger
class snake_case (UpperCamelCase ):
def __init__( self ,UpperCAmelCase_ ) -> int:
lowercase__ = lock_file
return None
def __str__( self ) -> Union[str, Any]:
lowercase__ = F'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class snake_case :
def __init__( self ,UpperCAmelCase_ ) -> List[Any]:
lowercase__ = lock
return None
def __enter__( self ) -> Optional[int]:
return self.lock
def __exit__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> int:
self.lock.release()
return None
class snake_case :
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=-1 ,UpperCAmelCase_=None ) -> Tuple:
lowercase__ = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
lowercase__ = self.hash_filename_if_too_long(UpperCAmelCase_ ,UpperCAmelCase_ )
# The path to the lock file.
lowercase__ = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
lowercase__ = None
# The default timeout value.
lowercase__ = timeout
# We use this lock primarily for the lock counter.
lowercase__ = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
lowercase__ = 0
return None
@property
def _a ( self ) -> List[str]:
return self._lock_file
@property
def _a ( self ) -> Optional[int]:
return self._timeout
@timeout.setter
def _a ( self ,UpperCAmelCase_ ) -> Optional[Any]:
lowercase__ = float(UpperCAmelCase_ )
return None
def _a ( self ) -> Optional[Any]:
raise NotImplementedError()
def _a ( self ) -> Optional[int]:
raise NotImplementedError()
@property
def _a ( self ) -> Dict:
return self._lock_file_fd is not None
def _a ( self ,UpperCAmelCase_=None ,UpperCAmelCase_=0.05 ) -> Optional[Any]:
# Use the default timeout, if no timeout is provided.
if timeout is None:
lowercase__ = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lowercase__ = id(self )
lowercase__ = self._lock_file
lowercase__ = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(UpperCAmelCase_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
lowercase__ = max(0 ,self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def _a ( self ,UpperCAmelCase_=False ) -> List[Any]:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lowercase__ = id(self )
lowercase__ = self._lock_file
logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
lowercase__ = 0
logger().debug(F'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self ) -> Dict:
self.acquire()
return self
def __exit__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> int:
self.release()
return None
def __del__( self ) -> Union[str, Any]:
self.release(force=UpperCAmelCase_ )
return None
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> str:
lowercase__ = os.path.basename(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > max_length and max_length > 0:
lowercase__ = os.path.dirname(UpperCAmelCase_ )
lowercase__ = str(hash(UpperCAmelCase_ ) )
lowercase__ = filename[: max_length - len(UpperCAmelCase_ ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(UpperCAmelCase_ ,UpperCAmelCase_ )
else:
return path
class snake_case (UpperCamelCase ):
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=-1 ,UpperCAmelCase_=None ) -> Dict:
from .file_utils import relative_to_absolute_path
super().__init__(UpperCAmelCase_ ,timeout=UpperCAmelCase_ ,max_filename_length=UpperCAmelCase_ )
lowercase__ = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def _a ( self ) -> List[str]:
lowercase__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
lowercase__ = os.open(self._lock_file ,UpperCAmelCase_ )
except OSError:
pass
else:
try:
msvcrt.locking(UpperCAmelCase_ ,msvcrt.LK_NBLCK ,1 )
except OSError:
os.close(UpperCAmelCase_ )
else:
lowercase__ = fd
return None
def _a ( self ) -> Any:
lowercase__ = self._lock_file_fd
lowercase__ = None
msvcrt.locking(UpperCAmelCase_ ,msvcrt.LK_UNLCK ,1 )
os.close(UpperCAmelCase_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class snake_case (UpperCamelCase ):
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=-1 ,UpperCAmelCase_=None ) -> int:
lowercase__ = os.statvfs(os.path.dirname(UpperCAmelCase_ ) ).f_namemax
super().__init__(UpperCAmelCase_ ,timeout=UpperCAmelCase_ ,max_filename_length=UpperCAmelCase_ )
def _a ( self ) -> List[str]:
lowercase__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
lowercase__ = os.open(self._lock_file ,UpperCAmelCase_ )
try:
fcntl.flock(UpperCAmelCase_ ,fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(UpperCAmelCase_ )
else:
lowercase__ = fd
return None
def _a ( self ) -> int:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
lowercase__ = self._lock_file_fd
lowercase__ = None
fcntl.flock(UpperCAmelCase_ ,fcntl.LOCK_UN )
os.close(UpperCAmelCase_ )
return None
class snake_case (UpperCamelCase ):
def _a ( self ) -> Optional[Any]:
lowercase__ = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lowercase__ = os.open(self._lock_file ,UpperCAmelCase_ )
except OSError:
pass
else:
lowercase__ = fd
return None
def _a ( self ) -> Tuple:
os.close(self._lock_file_fd )
lowercase__ = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
SCREAMING_SNAKE_CASE__ = None
if msvcrt:
SCREAMING_SNAKE_CASE__ = WindowsFileLock
elif fcntl:
SCREAMING_SNAKE_CASE__ = UnixFileLock
else:
SCREAMING_SNAKE_CASE__ = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 267 | 1 |
'''simple docstring'''
from manim import *
class __a ( a__ ):
'''simple docstring'''
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE_ : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE_ : Tuple = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE_ : Dict = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE_ : int = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE_ : List[str] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE_ : List[Any] = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE_ : List[Any] = Text('CPU' , font_size=24 )
SCREAMING_SNAKE_CASE_ : str = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE_ : List[Any] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE_ : Dict = Text('GPU' , font_size=24 )
SCREAMING_SNAKE_CASE_ : int = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE_ : List[str] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE_ : List[Any] = Text('Model' , font_size=24 )
SCREAMING_SNAKE_CASE_ : int = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i, rect in enumerate(lowerCamelCase_ ):
rect.set_stroke(lowerCamelCase_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
SCREAMING_SNAKE_CASE_ : Optional[Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCamelCase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowerCamelCase_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowerCamelCase_ , buff=0.0 )
self.add(lowerCamelCase_ )
cpu_targs.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE_ : List[Any] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Text('Loaded Checkpoint' , font_size=24 )
SCREAMING_SNAKE_CASE_ : int = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , aligned_edge=lowerCamelCase_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
SCREAMING_SNAKE_CASE_ : str = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE_ : List[Any] = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(lowerCamelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MarkupText(
F'''Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ ) , Write(lowerCamelCase_ ) )
self.play(Write(lowerCamelCase_ , run_time=1 ) , Create(lowerCamelCase_ , run_time=1 ) )
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
for i, rect in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Dict = fill.copy().set_fill(lowerCamelCase_ , opacity=0.7 )
target.move_to(lowerCamelCase_ )
first_animations.append(GrowFromCenter(lowerCamelCase_ , run_time=1 ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowerCamelCase_ , run_time=1.5 ) )
self.play(*lowerCamelCase_ )
self.play(*lowerCamelCase_ )
self.wait() | 709 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
A = logging.get_logger(__name__)
def _lowerCamelCase( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[int, Iterable[int]] , lowerCAmelCase__ : bool , lowerCAmelCase__ : int ):
'''simple docstring'''
def constraint_to_multiple_of(lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any]=0 , lowerCAmelCase__ : Any=None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE_ : Dict = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE_ : str = (output_size, output_size) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else output_size
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = get_image_size(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE_ : Optional[int] = output_height / input_height
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE_ : str = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE_ : Optional[Any] = scale_height
SCREAMING_SNAKE_CASE_ : Any = constraint_to_multiple_of(scale_height * input_height , multiple=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = constraint_to_multiple_of(scale_width * input_width , multiple=lowerCAmelCase__ )
return (new_height, new_width)
class __a ( __A ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = ["""pixel_values"""]
def __init__( self , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = PILImageResampling.BILINEAR , UpperCamelCase__ = False , UpperCamelCase__ = 1 , UpperCamelCase__ = True , UpperCamelCase__ = 1 / 255 , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ):
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Any = size if size is not None else {'height': 384, 'width': 384}
SCREAMING_SNAKE_CASE_ : str = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : str = do_resize
SCREAMING_SNAKE_CASE_ : str = size
SCREAMING_SNAKE_CASE_ : int = keep_aspect_ratio
SCREAMING_SNAKE_CASE_ : Tuple = ensure_multiple_of
SCREAMING_SNAKE_CASE_ : Optional[int] = resample
SCREAMING_SNAKE_CASE_ : int = do_rescale
SCREAMING_SNAKE_CASE_ : int = rescale_factor
SCREAMING_SNAKE_CASE_ : str = do_normalize
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = 1 , UpperCamelCase__ = PILImageResampling.BICUBIC , UpperCamelCase__ = None , **UpperCamelCase__ , ):
SCREAMING_SNAKE_CASE_ : List[Any] = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = get_resize_output_image_size(
UpperCamelCase__ , output_size=(size['height'], size['width']) , keep_aspect_ratio=UpperCamelCase__ , multiple=UpperCamelCase__ , )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ):
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ):
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = ChannelDimension.FIRST , **UpperCamelCase__ , ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : Optional[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : str = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE_ : str = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE_ : Union[str, Any] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : Dict = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : List[Any] = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : Any = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ : Tuple = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ : Any = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ : Any = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE_ : str = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE_ : Dict = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : int = target_sizes.numpy()
SCREAMING_SNAKE_CASE_ : Tuple = []
for idx in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE_ : Any = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 97 | 0 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __magic_name__ ( __lowerCAmelCase : int ) -> bool:
__lowerCamelCase = int(number**0.5 )
return number == sq * sq
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> tuple[int, int]:
__lowerCamelCase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__lowerCamelCase = x_den * y_den * z_den
__lowerCamelCase = gcd(_lowerCamelCase , _lowerCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def __magic_name__ ( __lowerCAmelCase : int = 35 ) -> int:
__lowerCamelCase = set()
__lowerCamelCase = 42
__lowerCamelCase = Fraction(0 )
__lowerCamelCase = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
__lowerCamelCase = x_num * y_den + x_den * y_num
__lowerCamelCase = x_den * y_den
__lowerCamelCase = gcd(_lowerCamelCase , _lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__lowerCamelCase = add_three(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
unique_s.add(_lowerCamelCase )
# n=2
__lowerCamelCase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__lowerCamelCase = x_den * x_den * y_den * y_den
if is_sq(_lowerCamelCase ) and is_sq(_lowerCamelCase ):
__lowerCamelCase = int(sqrt(_lowerCamelCase ) )
__lowerCamelCase = int(sqrt(_lowerCamelCase ) )
__lowerCamelCase = gcd(_lowerCamelCase , _lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__lowerCamelCase = add_three(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
unique_s.add(_lowerCamelCase )
# n=-1
__lowerCamelCase = x_num * y_num
__lowerCamelCase = x_den * y_num + x_num * y_den
__lowerCamelCase = gcd(_lowerCamelCase , _lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__lowerCamelCase = add_three(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
unique_s.add(_lowerCamelCase )
# n=2
__lowerCamelCase = x_num * x_num * y_num * y_num
__lowerCamelCase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_lowerCamelCase ) and is_sq(_lowerCamelCase ):
__lowerCamelCase = int(sqrt(_lowerCamelCase ) )
__lowerCamelCase = int(sqrt(_lowerCamelCase ) )
__lowerCamelCase = gcd(_lowerCamelCase , _lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__lowerCamelCase = add_three(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
unique_s.add(_lowerCamelCase )
for num, den in unique_s:
total += Fraction(_lowerCamelCase , _lowerCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'{solution() = }')
| 298 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase ( metaclass=lowercase__ ):
lowercase = ['''flax''', '''transformers''']
def __init__(self : List[Any] ,*SCREAMING_SNAKE_CASE_ : Union[str, Any] ,**SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> str:
"""simple docstring"""
requires_backends(self ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase (cls : List[Any] ,*SCREAMING_SNAKE_CASE_ : List[Any] ,**SCREAMING_SNAKE_CASE_ : int ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase (cls : List[Any] ,*SCREAMING_SNAKE_CASE_ : str ,**SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
class lowercase ( metaclass=lowercase__ ):
lowercase = ['''flax''', '''transformers''']
def __init__(self : int ,*SCREAMING_SNAKE_CASE_ : List[Any] ,**SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase (cls : int ,*SCREAMING_SNAKE_CASE_ : str ,**SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase (cls : List[str] ,*SCREAMING_SNAKE_CASE_ : List[str] ,**SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
class lowercase ( metaclass=lowercase__ ):
lowercase = ['''flax''', '''transformers''']
def __init__(self : Tuple ,*SCREAMING_SNAKE_CASE_ : Optional[int] ,**SCREAMING_SNAKE_CASE_ : Dict ) -> List[str]:
"""simple docstring"""
requires_backends(self ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase (cls : Tuple ,*SCREAMING_SNAKE_CASE_ : Dict ,**SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase (cls : Tuple ,*SCREAMING_SNAKE_CASE_ : Tuple ,**SCREAMING_SNAKE_CASE_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
class lowercase ( metaclass=lowercase__ ):
lowercase = ['''flax''', '''transformers''']
def __init__(self : Optional[Any] ,*SCREAMING_SNAKE_CASE_ : Any ,**SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase (cls : List[Any] ,*SCREAMING_SNAKE_CASE_ : Optional[Any] ,**SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase (cls : List[Any] ,*SCREAMING_SNAKE_CASE_ : List[Any] ,**SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
| 535 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__SCREAMING_SNAKE_CASE : Tuple = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''')
__SCREAMING_SNAKE_CASE : List[Any] = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
__SCREAMING_SNAKE_CASE : Tuple = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def lowerCAmelCase_( lowercase_ : str ) -> Dict:
with open(lowercase_ , '''rb''' ) as f:
_lowerCamelCase = Image.open(lowercase_ )
return im.convert('''RGB''' )
@dataclass
class lowerCamelCase_:
'''simple docstring'''
lowercase__ : Optional[str] = field(
default=A__, metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
}, )
lowercase__ : Optional[str] = field(
default=A__, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowercase__ : Optional[str] = field(default=A__, metadata={'help': 'A folder containing the training data.'} )
lowercase__ : Optional[str] = field(default=A__, metadata={'help': 'A folder containing the validation data.'} )
lowercase__ : Optional[float] = field(
default=0.15, metadata={'help': 'Percent to split off of train for validation.'} )
lowercase__ : Optional[int] = field(
default=A__, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
}, )
lowercase__ : Optional[int] = field(
default=A__, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
}, )
def snake_case__ ( self ):
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'''You must specify either a dataset name from the hub or a train and/or validation directory.''' )
@dataclass
class lowerCamelCase_:
'''simple docstring'''
lowercase__ : str = field(
default='google/vit-base-patch16-224-in21k', metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'}, )
lowercase__ : Optional[str] = field(
default=A__, metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(A__ )}, )
lowercase__ : Optional[str] = field(
default=A__, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase__ : Optional[str] = field(
default=A__, metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
lowercase__ : str = field(
default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}, )
lowercase__ : str = field(default=A__, metadata={'help': 'Name or path of preprocessor config.'} )
lowercase__ : bool = field(
default=A__, metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
}, )
lowercase__ : bool = field(
default=A__, metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'}, )
def lowerCAmelCase_( lowercase_ : Optional[Any] ) -> Tuple:
_lowerCamelCase = torch.stack([example['''pixel_values'''] for example in examples] )
_lowerCamelCase = torch.tensor([example['''labels'''] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def lowerCAmelCase_( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_image_classification''' , lowercase_ , lowercase_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCamelCase = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_lowerCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
_lowerCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='''image-classification''' , use_auth_token=True if model_args.use_auth_token else None , )
else:
_lowerCamelCase = {}
if data_args.train_dir is not None:
_lowerCamelCase = os.path.join(data_args.train_dir , '''**''' )
if data_args.validation_dir is not None:
_lowerCamelCase = os.path.join(data_args.validation_dir , '''**''' )
_lowerCamelCase = load_dataset(
'''imagefolder''' , data_files=lowercase_ , cache_dir=model_args.cache_dir , task='''image-classification''' , )
# If we don't have a validation split, split off a percentage of train as validation.
_lowerCamelCase = None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase_ ) and data_args.train_val_split > 0.0:
_lowerCamelCase = dataset['''train'''].train_test_split(data_args.train_val_split )
_lowerCamelCase = split['''train''']
_lowerCamelCase = split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_lowerCamelCase = dataset['''train'''].features['''labels'''].names
_lowerCamelCase , _lowerCamelCase = {}, {}
for i, label in enumerate(lowercase_ ):
_lowerCamelCase = str(lowercase_ )
_lowerCamelCase = label
# Load the accuracy metric from the datasets package
_lowerCamelCase = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase_ : Union[str, Any] ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
_lowerCamelCase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase_ ) , labelaid=lowercase_ , idalabel=lowercase_ , finetuning_task='''image-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCamelCase = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
_lowerCamelCase = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
_lowerCamelCase = image_processor.size['''shortest_edge''']
else:
_lowerCamelCase = (image_processor.size['''height'''], image_processor.size['''width'''])
_lowerCamelCase = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
_lowerCamelCase = Compose(
[
RandomResizedCrop(lowercase_ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
_lowerCamelCase = Compose(
[
Resize(lowercase_ ),
CenterCrop(lowercase_ ),
ToTensor(),
normalize,
] )
def train_transforms(lowercase_ : Optional[Any] ):
_lowerCamelCase = [
_train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(lowercase_ : Tuple ):
_lowerCamelCase = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
_lowerCamelCase = (
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(lowercase_ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
_lowerCamelCase = (
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(lowercase_ )
# Initalize our trainer
_lowerCamelCase = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=dataset['''train'''] if training_args.do_train else None , eval_dataset=dataset['''validation'''] if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , data_collator=lowercase_ , )
# Training
if training_args.do_train:
_lowerCamelCase = None
if training_args.resume_from_checkpoint is not None:
_lowerCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCamelCase = last_checkpoint
_lowerCamelCase = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_lowerCamelCase = trainer.evaluate()
trainer.log_metrics('''eval''' , lowercase_ )
trainer.save_metrics('''eval''' , lowercase_ )
# Write model card and (optionally) push to hub
_lowerCamelCase = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase_ )
else:
trainer.create_model_card(**lowercase_ )
if __name__ == "__main__":
main()
| 623 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 623 | 1 |
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = AlbertTokenizer
A__ : Any = AlbertTokenizerFast
A__ : List[str] = True
A__ : Optional[Any] = True
A__ : Tuple = True
def snake_case__ ( self ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
A__ = AlbertTokenizer(SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Dict:
A__ = "this is a test"
A__ = "this is a test"
return input_text, output_text
def snake_case__ ( self ) -> List[str]:
A__ = "<pad>"
A__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Tuple:
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 30000 )
def snake_case__ ( self ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def snake_case__ ( self ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = "I was born in 92000, and this is falsé."
A__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
A__ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
A__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ )
A__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> str:
A__ = AlbertTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
A__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [48, 25, 21, 1289] )
A__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
A__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
A__ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def snake_case__ ( self ) -> Union[str, Any]:
A__ = AlbertTokenizer(SCREAMING_SNAKE_CASE__ )
A__ = tokenizer.encode("sequence builders" )
A__ = tokenizer.encode("multi-sequence build" )
A__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ )
A__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def snake_case__ ( self ) -> str:
# fmt: off
A__ = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 104 |
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
snake_case : Any = logging.get_logger(__name__)
snake_case : Union[str, Any] = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def lowercase__ ( __UpperCamelCase : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
__lowercase = TOKENIZER_CLASSES
else:
__lowercase = {tokenizer_name: getattr(__UpperCamelCase , tokenizer_name + """Fast""" )}
logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
__lowercase = TOKENIZER_CLASSES[tokenizer_name]
__lowercase = True
if checkpoint_name is None:
__lowercase = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__lowercase = [checkpoint_name]
logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
__lowercase = tokenizer_class.from_pretrained(__UpperCamelCase , force_download=__UpperCamelCase )
# Save fast tokenizer
logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
__lowercase , __lowercase = checkpoint.split("""/""" )
__lowercase = os.path.join(__UpperCamelCase , __UpperCamelCase )
elif add_prefix:
__lowercase = checkpoint
__lowercase = dump_path
else:
__lowercase = None
__lowercase = dump_path
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__lowercase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__lowercase = file_path.split(__UpperCamelCase )[-1][0]
if next_char == "/":
__lowercase = os.path.join(__UpperCamelCase , __UpperCamelCase )
__lowercase = None
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
__lowercase = tokenizer.save_pretrained(
__UpperCamelCase , legacy_format=__UpperCamelCase , filename_prefix=__UpperCamelCase )
logger.info(F'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith("""tokenizer.json""" ):
os.remove(__UpperCamelCase )
logger.info(F'''=> removing {file_name}''' )
if __name__ == "__main__":
snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
snake_case : Tuple = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 566 | 0 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase : List[str] ={'UserAgent': UserAgent().random}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = script.contents[0]
lowerCAmelCase : int = json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _a :
def __init__( self , lowercase_ ) -> Tuple:
lowerCAmelCase : List[str] = f"""https://www.instagram.com/{username}/"""
lowerCAmelCase : str = self.get_json()
def _snake_case ( self ) -> dict:
lowerCAmelCase : Tuple = requests.get(self.url , headers=lowercase_ ).text
lowerCAmelCase : int = BeautifulSoup(lowercase_ , """html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ) -> str:
return f"""{self.__class__.__name__}('{self.username}')"""
def __str__( self ) -> str:
return f"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def _snake_case ( self ) -> str:
return self.user_data["username"]
@property
def _snake_case ( self ) -> str:
return self.user_data["full_name"]
@property
def _snake_case ( self ) -> str:
return self.user_data["biography"]
@property
def _snake_case ( self ) -> str:
return self.user_data["business_email"]
@property
def _snake_case ( self ) -> str:
return self.user_data["external_url"]
@property
def _snake_case ( self ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def _snake_case ( self ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def _snake_case ( self ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _snake_case ( self ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def _snake_case ( self ) -> bool:
return self.user_data["is_verified"]
@property
def _snake_case ( self ) -> bool:
return self.user_data["is_private"]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ = "github" ):
'''simple docstring'''
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
lowerCAmelCase : int = InstagramUser(SCREAMING_SNAKE_CASE__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data ,SCREAMING_SNAKE_CASE__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : int =InstagramUser('github')
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 693 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : int =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _a ( snake_case_ , snake_case_ ):
_UpperCamelCase: int = "swin"
_UpperCamelCase: str = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , lowercase_=224 , lowercase_=4 , lowercase_=3 , lowercase_=96 , lowercase_=[2, 2, 6, 2] , lowercase_=[3, 6, 12, 24] , lowercase_=7 , lowercase_=4.0 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_="gelu" , lowercase_=False , lowercase_=0.0_2 , lowercase_=1e-5 , lowercase_=32 , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Tuple:
super().__init__(**lowercase_ )
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : Optional[Any] = patch_size
lowerCAmelCase : Optional[Any] = num_channels
lowerCAmelCase : List[Any] = embed_dim
lowerCAmelCase : str = depths
lowerCAmelCase : List[str] = len(lowercase_ )
lowerCAmelCase : Any = num_heads
lowerCAmelCase : str = window_size
lowerCAmelCase : List[str] = mlp_ratio
lowerCAmelCase : List[Any] = qkv_bias
lowerCAmelCase : List[str] = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : Any = drop_path_rate
lowerCAmelCase : int = hidden_act
lowerCAmelCase : int = use_absolute_embeddings
lowerCAmelCase : Dict = layer_norm_eps
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase : Any = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
lowerCAmelCase : Dict = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowercase_ ) + 1 )]
lowerCAmelCase , lowerCAmelCase : Optional[Any] = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
class _a ( snake_case_ ):
_UpperCamelCase: int = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1e-4
| 693 | 1 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = IFInpaintingPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._get_dummy_components()
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_snake_case )
else:
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_local()
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 4 |
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
super().__init__()
self.register_modules(vqvae=__UpperCAmelCase , unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = None , __UpperCAmelCase = 0.0 , __UpperCAmelCase = 50 , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> Union[Tuple, ImagePipelineOutput]:
A : List[Any] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__UpperCAmelCase , )
A : List[Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A : Optional[int] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__UpperCAmelCase )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
A : str = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A : List[str] = {}
if accepts_eta:
A : Optional[Any] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
A : Any = self.scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
# predict the noise residual
A : Dict = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
A : int = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
# decode the image latents with the VAE
A : str = self.vqvae.decode(__UpperCAmelCase ).sample
A : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
A : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A : Union[str, Any] = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 542 | 0 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def _lowerCAmelCase ( __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def _lowerCAmelCase ( __lowerCamelCase : Optional[int] ):
"""simple docstring"""
class _SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , UpperCamelCase : Optional[int] )->Any:
__SCREAMING_SNAKE_CASE : Any = metric_id
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase = [MetricMock(UpperCamelCase ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def __snake_case ( self : Optional[int] )->Optional[Any]:
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def _lowerCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ):
"""simple docstring"""
if "tmp_path" in args:
__SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(__lowerCamelCase , match="https://huggingface.co/docs/evaluate" ):
func(*__lowerCamelCase )
| 713 |
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _SCREAMING_SNAKE_CASE :
pass
| 447 | 0 |
def __UpperCamelCase ( A , A , A , A ):
UpperCamelCase__ = len(_UpperCamelCase ), len(grid[0] )
if (
min(_UpperCamelCase , _UpperCamelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
UpperCamelCase__ = 0
count += depth_first_search(_UpperCamelCase , row + 1 , _UpperCamelCase , _UpperCamelCase )
count += depth_first_search(_UpperCamelCase , row - 1 , _UpperCamelCase , _UpperCamelCase )
count += depth_first_search(_UpperCamelCase , _UpperCamelCase , col + 1 , _UpperCamelCase )
count += depth_first_search(_UpperCamelCase , _UpperCamelCase , col - 1 , _UpperCamelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 415 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = 'Hello world! cécé herlolip'
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowerCamelCase__: Any = FairseqRobertaModel.from_pretrained(_UpperCamelCase )
roberta.eval() # disable dropout
lowerCamelCase__: Any = roberta.model.encoder.sentence_encoder
lowerCamelCase__: Any = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
lowerCamelCase__: Union[str, Any] = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" , _UpperCamelCase )
lowerCamelCase__: str = XLMRobertaXLForSequenceClassification(_UpperCamelCase ) if classification_head else XLMRobertaXLForMaskedLM(_UpperCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase__: Union[str, Any] = roberta_sent_encoder.embed_tokens.weight
lowerCamelCase__: List[str] = roberta_sent_encoder.embed_positions.weight
lowerCamelCase__: Optional[int] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
lowerCamelCase__: Any = roberta_sent_encoder.layer_norm.weight
lowerCamelCase__: Tuple = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase__: BertLayer = model.roberta.encoder.layer[i]
lowerCamelCase__: TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
lowerCamelCase__: RobertaAttention = layer.attention
lowerCamelCase__: Union[str, Any] = roberta_layer.self_attn_layer_norm.weight
lowerCamelCase__: Any = roberta_layer.self_attn_layer_norm.bias
# self attention
lowerCamelCase__: BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
lowerCamelCase__: Tuple = roberta_layer.self_attn.q_proj.weight
lowerCamelCase__: Optional[int] = roberta_layer.self_attn.q_proj.bias
lowerCamelCase__: Optional[int] = roberta_layer.self_attn.k_proj.weight
lowerCamelCase__: int = roberta_layer.self_attn.k_proj.bias
lowerCamelCase__: Union[str, Any] = roberta_layer.self_attn.v_proj.weight
lowerCamelCase__: List[str] = roberta_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase__: BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
lowerCamelCase__: int = roberta_layer.self_attn.out_proj.weight
lowerCamelCase__: Tuple = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
lowerCamelCase__: Any = roberta_layer.final_layer_norm.weight
lowerCamelCase__: Optional[int] = roberta_layer.final_layer_norm.bias
# intermediate
lowerCamelCase__: BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCamelCase__: Tuple = roberta_layer.fca.weight
lowerCamelCase__: Tuple = roberta_layer.fca.bias
# output
lowerCamelCase__: BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCamelCase__: Any = roberta_layer.fca.weight
lowerCamelCase__: Optional[int] = roberta_layer.fca.bias
# end of layer
if classification_head:
lowerCamelCase__: Dict = roberta.model.classification_heads["""mnli"""].dense.weight
lowerCamelCase__: Union[str, Any] = roberta.model.classification_heads["""mnli"""].dense.bias
lowerCamelCase__: str = roberta.model.classification_heads["""mnli"""].out_proj.weight
lowerCamelCase__: List[str] = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
lowerCamelCase__: List[Any] = roberta.model.encoder.lm_head.dense.weight
lowerCamelCase__: List[Any] = roberta.model.encoder.lm_head.dense.bias
lowerCamelCase__: Optional[Any] = roberta.model.encoder.lm_head.layer_norm.weight
lowerCamelCase__: Optional[int] = roberta.model.encoder.lm_head.layer_norm.bias
lowerCamelCase__: List[Any] = roberta.model.encoder.lm_head.weight
lowerCamelCase__: Dict = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase__: torch.Tensor = roberta.encode(_UpperCamelCase ).unsqueeze(0 ) # batch of size 1
lowerCamelCase__: Dict = model(_UpperCamelCase )[0]
if classification_head:
lowerCamelCase__: Optional[int] = roberta.model.classification_heads["""mnli"""](roberta.extract_features(_UpperCamelCase ) )
else:
lowerCamelCase__: List[Any] = roberta.model(_UpperCamelCase )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase__: Optional[int] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCamelCase__: List[Any] = torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(_UpperCamelCase ).mkdir(parents=_UpperCamelCase , exist_ok=_UpperCamelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
_lowercase = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 306 | 0 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowercase : str = logging.get_logger(__name__)
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> List[Any]:
"""simple docstring"""
super().__init__()
lowerCAmelCase = nn.ModuleList(SCREAMING_SNAKE_CASE )
def __A ( self : List[str] , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : Union[torch.Tensor, float, int] , SCREAMING_SNAKE_CASE : torch.Tensor , SCREAMING_SNAKE_CASE : List[torch.tensor] , SCREAMING_SNAKE_CASE : List[float] , SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = True , ) -> Union[ControlNetOutput, Tuple]:
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.nets ) ):
lowerCAmelCase , lowerCAmelCase = controlnet(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
# merge samples
if i == 0:
lowerCAmelCase , lowerCAmelCase = down_samples, mid_sample
else:
lowerCAmelCase = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __A ( self : Tuple , SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Callable = None , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[str] = None , ) -> Any:
"""simple docstring"""
lowerCAmelCase = 0
lowerCAmelCase = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
SCREAMING_SNAKE_CASE , is_main_process=SCREAMING_SNAKE_CASE , save_function=SCREAMING_SNAKE_CASE , safe_serialization=SCREAMING_SNAKE_CASE , variant=SCREAMING_SNAKE_CASE , )
idx += 1
lowerCAmelCase = model_path_to_save + f"_{idx}"
@classmethod
def __A ( cls : Any , SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] , **SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase = 0
lowerCAmelCase = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
lowerCAmelCase = pretrained_model_path
while os.path.isdir(SCREAMING_SNAKE_CASE ):
lowerCAmelCase = ControlNetModel.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
controlnets.append(SCREAMING_SNAKE_CASE )
idx += 1
lowerCAmelCase = pretrained_model_path + f"_{idx}"
logger.info(f"{len(SCREAMING_SNAKE_CASE )} controlnets loaded from {pretrained_model_path}." )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError(
f"No ControlNets found under {os.path.dirname(SCREAMING_SNAKE_CASE )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(SCREAMING_SNAKE_CASE )
| 159 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
lowercase : Union[str, Any] = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
lowercase : Optional[int] = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
lowercase : int = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def __a ( A__ , A__ , A__ , A__ , A__ = None , A__ = False , ) -> List[str]:
if label_map is not None:
for old_id, new_id in label_map.items():
lowerCAmelCase = new_id
# turn into Numpy arrays
lowerCAmelCase = np.array(A__ )
lowerCAmelCase = np.array(A__ )
if reduce_labels:
lowerCAmelCase = 255
lowerCAmelCase = label - 1
lowerCAmelCase = 255
lowerCAmelCase = label != ignore_index
lowerCAmelCase = np.not_equal(A__ , A__ )
lowerCAmelCase = pred_label[mask]
lowerCAmelCase = np.array(A__ )[mask]
lowerCAmelCase = pred_label[pred_label == label]
lowerCAmelCase = np.histogram(A__ , bins=A__ , range=(0, num_labels - 1) )[0]
lowerCAmelCase = np.histogram(A__ , bins=A__ , range=(0, num_labels - 1) )[0]
lowerCAmelCase = np.histogram(A__ , bins=A__ , range=(0, num_labels - 1) )[0]
lowerCAmelCase = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __a ( A__ , A__ , A__ , A__ , A__ = None , A__ = False , ) -> Optional[int]:
lowerCAmelCase = np.zeros((num_labels,) , dtype=np.floataa )
lowerCAmelCase = np.zeros((num_labels,) , dtype=np.floataa )
lowerCAmelCase = np.zeros((num_labels,) , dtype=np.floataa )
lowerCAmelCase = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(A__ , A__ ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = intersect_and_union(
A__ , A__ , A__ , A__ , A__ , A__ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __a ( A__ , A__ , A__ , A__ , A__ = None , A__ = None , A__ = False , ) -> Dict:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = total_intersect_and_union(
A__ , A__ , A__ , A__ , A__ , A__ )
# compute metrics
lowerCAmelCase = {}
lowerCAmelCase = total_area_intersect.sum() / total_area_label.sum()
lowerCAmelCase = total_area_intersect / total_area_union
lowerCAmelCase = total_area_intersect / total_area_label
lowerCAmelCase = np.nanmean(A__ )
lowerCAmelCase = np.nanmean(A__ )
lowerCAmelCase = all_acc
lowerCAmelCase = iou
lowerCAmelCase = acc
if nan_to_num is not None:
lowerCAmelCase = {metric: np.nan_to_num(A__ , nan=A__ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def __A ( self : Tuple ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
"references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
} ) , reference_urls=[
"https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"
] , )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : bool , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[Dict[int, int]] = None , SCREAMING_SNAKE_CASE : bool = False , ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = mean_iou(
results=SCREAMING_SNAKE_CASE , gt_seg_maps=SCREAMING_SNAKE_CASE , num_labels=SCREAMING_SNAKE_CASE , ignore_index=SCREAMING_SNAKE_CASE , nan_to_num=SCREAMING_SNAKE_CASE , label_map=SCREAMING_SNAKE_CASE , reduce_labels=SCREAMING_SNAKE_CASE , )
return iou_result
| 159 | 1 |
from collections import defaultdict
from math import ceil, sqrt
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 100_0000 , __SCREAMING_SNAKE_CASE = 10 ):
lowercase = defaultdict(__SCREAMING_SNAKE_CASE )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowercase = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowercase = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__SCREAMING_SNAKE_CASE , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 84 |
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class UpperCAmelCase_ (tf.keras.optimizers.schedules.LearningRateSchedule ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = None , ) -> Any:
super().__init__()
__lowerCamelCase : Optional[Any] = initial_learning_rate
__lowerCamelCase : Optional[Any] = warmup_steps
__lowerCamelCase : Union[str, Any] = power
__lowerCamelCase : Optional[int] = decay_schedule_fn
__lowerCamelCase : Any = name
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> str:
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
__lowerCamelCase : str = tf.cast(SCREAMING_SNAKE_CASE_ , tf.floataa )
__lowerCamelCase : Optional[int] = tf.cast(self.warmup_steps , tf.floataa )
__lowerCamelCase : List[Any] = global_step_float / warmup_steps_float
__lowerCamelCase : Optional[Any] = self.initial_learning_rate * tf.math.pow(SCREAMING_SNAKE_CASE_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=SCREAMING_SNAKE_CASE_ , )
def lowercase_ ( self ) -> Optional[Any]:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def UpperCAmelCase__ ( UpperCAmelCase_ : float , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 0.9 , UpperCAmelCase_ : float = 0.999 , UpperCAmelCase_ : float = 1e-8 , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : Optional[List[str]] = None , ) -> int:
__lowerCamelCase : int = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=UpperCAmelCase_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=UpperCAmelCase_ , )
if num_warmup_steps:
__lowerCamelCase : str = WarmUp(
initial_learning_rate=UpperCAmelCase_ , decay_schedule_fn=UpperCAmelCase_ , warmup_steps=UpperCAmelCase_ , )
if weight_decay_rate > 0.0:
__lowerCamelCase : List[Any] = AdamWeightDecay(
learning_rate=UpperCAmelCase_ , weight_decay_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=UpperCAmelCase_ , )
else:
__lowerCamelCase : Tuple = tf.keras.optimizers.Adam(
learning_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ = 0.0_0_1 , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = 0.9_9_9 , SCREAMING_SNAKE_CASE_ = 1E-7 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "AdamWeightDecay" , **SCREAMING_SNAKE_CASE_ , ) -> int:
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = weight_decay_rate
__lowerCamelCase : str = include_in_weight_decay
__lowerCamelCase : List[Any] = exclude_from_weight_decay
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ ) -> Dict:
__lowerCamelCase : Any = {'WarmUp': WarmUp}
return super(SCREAMING_SNAKE_CASE_ , cls ).from_config(SCREAMING_SNAKE_CASE_ , custom_objects=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
super(SCREAMING_SNAKE_CASE_ , self )._prepare_local(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
__lowerCamelCase : Tuple = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = list(zip(*SCREAMING_SNAKE_CASE_ ) )
return super(SCREAMING_SNAKE_CASE_ , self ).apply_gradients(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , name=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
__lowerCamelCase : Optional[int] = apply_state or {}
__lowerCamelCase : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
__lowerCamelCase : List[Any] = self._fallback_apply_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str:
__lowerCamelCase , __lowerCamelCase : Dict = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with tf.control_dependencies([decay] ):
return super(SCREAMING_SNAKE_CASE_ , self )._resource_apply_dense(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Union[str, Any]:
__lowerCamelCase , __lowerCamelCase : Tuple = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with tf.control_dependencies([decay] ):
return super(SCREAMING_SNAKE_CASE_ , self )._resource_apply_sparse(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : Any = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is not None:
return False
return True
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self ) -> Tuple:
__lowerCamelCase : Tuple = []
__lowerCamelCase : Optional[Any] = None
@property
def lowercase_ ( self ) -> List[str]:
if self._accum_steps is None:
__lowerCamelCase : Tuple = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=SCREAMING_SNAKE_CASE_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowercase_ ( self ) -> List[str]:
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> str:
if not self._gradients:
__lowerCamelCase : List[str] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(SCREAMING_SNAKE_CASE_ ) , trainable=SCREAMING_SNAKE_CASE_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(SCREAMING_SNAKE_CASE_ ) != len(self._gradients ):
raise ValueError(f'Expected {len(self._gradients )} gradients, but got {len(SCREAMING_SNAKE_CASE_ )}' )
for accum_gradient, gradient in zip(self._gradients , SCREAMING_SNAKE_CASE_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(SCREAMING_SNAKE_CASE_ )
self._accum_steps.assign_add(1 )
def lowercase_ ( self ) -> int:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(SCREAMING_SNAKE_CASE_ ) )
| 13 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case : Optional[Any] = logging.get_logger(__name__)
__snake_case : str = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Tuple = 'data2vec-text'
def __init__( self , _SCREAMING_SNAKE_CASE=3_0522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> str:
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = hidden_act
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = initializer_range
A_ = layer_norm_eps
A_ = position_embedding_type
A_ = use_cache
A_ = classifier_dropout
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
@property
def __A ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 711 | '''simple docstring'''
import math
def _UpperCAmelCase ( _UpperCamelCase : int ) -> bool:
return math.sqrt(_UpperCamelCase ) * math.sqrt(_UpperCamelCase ) == num
def _UpperCAmelCase ( _UpperCamelCase : int ) -> bool:
A_ = 0
A_ = n
while left <= right:
A_ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
A_ = mid - 1
else:
A_ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 174 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class UpperCamelCase ( datasets.BeamBasedBuilder ):
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=__lowercase , )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__lowercase )
class UpperCamelCase ( datasets.BeamBasedBuilder ):
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=__lowercase , )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__lowercase )
def UpperCamelCase ( )-> Any:
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def UpperCamelCase ( )-> Optional[Any]:
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class UpperCamelCase ( snake_case__ ):
@require_beam
def __A ( self ):
A__ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ = DummyBeamDataset(cache_dir=__lowercase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__lowercase , builder.name , "default" , "0.0.0" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
A__ = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , __lowercase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , __lowercase )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__lowercase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __A ( self ):
import apache_beam as beam
A__ = beam.io.parquetio.WriteToParquet
A__ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ = DummyBeamDataset(cache_dir=__lowercase , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
A__ = partial(__lowercase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
__lowercase , builder.name , "default" , "0.0.0" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
__lowercase , builder.name , "default" , "0.0.0" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
A__ = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , __lowercase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , __lowercase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(__lowercase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __A ( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ = DummyBeamDataset(cache_dir=__lowercase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __A ( self ):
A__ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ = NestedBeamDataset(cache_dir=__lowercase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__lowercase , builder.name , "default" , "0.0.0" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
A__ = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , __lowercase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , __lowercase )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__lowercase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 491 |
"""simple docstring"""
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
_UpperCamelCase = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
_UpperCamelCase = 'main'
# Default branch name
_UpperCamelCase = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
_UpperCamelCase = 'aaaaaaa'
# This commit does not exist, so we should 404.
_UpperCamelCase = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
_UpperCamelCase = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def lowerCAmelCase_ ( ):
'''simple docstring'''
print('''Welcome!''' )
yield
print('''Bye!''' )
@contextlib.contextmanager
def lowerCAmelCase_ ( ):
'''simple docstring'''
print('''Bonjour!''' )
yield
print('''Au revoir!''' )
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self :str ):
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('''transformers''' ) is not None
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def __lowercase ( self :str , __lowercase :str ):
with ContextManagers([] ):
print('''Transformers are awesome!''' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def __lowercase ( self :Any , __lowercase :Tuple ):
with ContextManagers([context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def __lowercase ( self :List[Any] , __lowercase :Optional[Any] ):
with ContextManagers([context_fr(), context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' )
@require_torch
def __lowercase ( self :Tuple ):
self.assertEqual(find_labels(__lowercase ) , ['''labels'''] )
self.assertEqual(find_labels(__lowercase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(__lowercase ) , ['''start_positions''', '''end_positions'''] )
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(__lowercase ) , ['''labels'''] )
@require_tf
def __lowercase ( self :str ):
self.assertEqual(find_labels(__lowercase ) , ['''labels'''] )
self.assertEqual(find_labels(__lowercase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(__lowercase ) , ['''start_positions''', '''end_positions'''] )
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(__lowercase ) , ['''labels'''] )
@require_flax
def __lowercase ( self :str ):
# Flax models don't have labels
self.assertEqual(find_labels(__lowercase ) , [] )
self.assertEqual(find_labels(__lowercase ) , [] )
self.assertEqual(find_labels(__lowercase ) , [] )
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(__lowercase ) , [] )
| 179 | 0 |
from __future__ import annotations
def A ( a_ ,a_ ) -> list[int]:
__UpperCamelCase : List[Any] =0
__UpperCamelCase : Dict =len(a_ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__UpperCamelCase : Any =i + 1
else:
__UpperCamelCase : List[Any] =j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 11, 15], 9) = }")
| 701 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] ="""dandelin/vilt-b32-finetuned-vqa"""
UpperCamelCase__ : str =(
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
UpperCamelCase__ : Any ="""image_qa"""
UpperCamelCase__ : int =AutoProcessor
UpperCamelCase__ : Optional[Any] =AutoModelForVisualQuestionAnswering
UpperCamelCase__ : Dict =["""image""", """text"""]
UpperCamelCase__ : List[Any] =["""text"""]
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
requires_backends(self , ['vision'] )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return self.pre_processor(lowerCamelCase__ , lowerCamelCase__ , return_tensors='pt' )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
with torch.no_grad():
return self.model(**lowerCamelCase__ ).logits
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[str] =outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 154 | 0 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = "▁"
_SCREAMING_SNAKE_CASE : str = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
}
_SCREAMING_SNAKE_CASE : Dict = {
"vocab_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"
),
},
"spm_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"
)
},
}
_SCREAMING_SNAKE_CASE : str = {
"facebook/s2t-small-librispeech-asr": 1024,
}
_SCREAMING_SNAKE_CASE : Dict = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
_SCREAMING_SNAKE_CASE : Tuple = {"mustc": MUSTC_LANGS}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Optional[Any] = VOCAB_FILES_NAMES
lowerCAmelCase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Dict = MAX_MODEL_INPUT_SIZES
lowerCAmelCase_ : Any = ["input_ids", "attention_mask"]
lowerCAmelCase_ : List[int] = []
def __init__( self , a__ , a__ , a__="<s>" , a__="</s>" , a__="<pad>" , a__="<unk>" , a__=False , a__=False , a__=None , a__=None , a__ = None , **a__ , ) -> None:
'''simple docstring'''
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__ , eos_token=a__ , unk_token=a__ , pad_token=a__ , do_upper_case=a__ , do_lower_case=a__ , tgt_lang=a__ , lang_codes=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
snake_case_ = do_upper_case
snake_case_ = do_lower_case
snake_case_ = load_json(a__ )
snake_case_ = {v: k for k, v in self.encoder.items()}
snake_case_ = spm_file
snake_case_ = load_spm(a__ , self.sp_model_kwargs )
if lang_codes is not None:
snake_case_ = lang_codes
snake_case_ = LANGUAGES[lang_codes]
snake_case_ = [F'<lang:{lang}>' for lang in self.langs]
snake_case_ = {lang: self.sp_model.PieceToId(F'<lang:{lang}>' ) for lang in self.langs}
snake_case_ = self.lang_tokens
snake_case_ = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
snake_case_ = {}
@property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.encoder )
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return self._tgt_lang
@tgt_lang.setter
def lowerCAmelCase__ ( self , a__ ) -> None:
'''simple docstring'''
snake_case_ = new_tgt_lang
self.set_tgt_lang_special_tokens(a__ )
def lowerCAmelCase__ ( self , a__ ) -> None:
'''simple docstring'''
snake_case_ = self.lang_code_to_id[tgt_lang]
snake_case_ = [lang_code_id]
def lowerCAmelCase__ ( self , a__ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(a__ , out_type=a__ )
def lowerCAmelCase__ ( self , a__ ) -> List[str]:
'''simple docstring'''
return self.encoder.get(a__ , self.encoder[self.unk_token] )
def lowerCAmelCase__ ( self , a__ ) -> str:
'''simple docstring'''
return self.decoder.get(a__ , self.unk_token )
def lowerCAmelCase__ ( self , a__ ) -> str:
'''simple docstring'''
snake_case_ = []
snake_case_ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
snake_case_ = self.sp_model.decode(a__ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
snake_case_ = []
else:
current_sub_tokens.append(a__ )
snake_case_ = self.sp_model.decode(a__ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowerCAmelCase__ ( self , a__ , a__=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self , a__ , a__ = None , a__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
snake_case_ = [1] * len(self.prefix_tokens )
snake_case_ = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(a__ )) + suffix_ones
return prefix_ones + ([0] * len(a__ )) + ([0] * len(a__ )) + suffix_ones
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self , a__ ) -> None:
'''simple docstring'''
snake_case_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case_ = {}
snake_case_ = load_spm(self.spm_file , self.sp_model_kwargs )
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ = Path(a__ )
assert save_dir.is_dir(), F'{save_directory} should be a directory'
snake_case_ = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
snake_case_ = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , a__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(a__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , a__ )
elif not os.path.isfile(self.spm_file ):
with open(a__ , "wb" ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (str(a__ ), str(a__ ))
def UpperCamelCase_( snake_case : str , snake_case : Dict[str, Any] ):
'''simple docstring'''
snake_case_ = sentencepiece.SentencePieceProcessor(**snake_case )
spm.Load(str(snake_case ) )
return spm
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
with open(snake_case , "r" ) as f:
return json.load(snake_case )
def UpperCamelCase_( snake_case : Tuple , snake_case : str ):
'''simple docstring'''
with open(snake_case , "w" ) as f:
json.dump(snake_case , snake_case , indent=2 )
| 400 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : torch.FloatTensor
class _snake_case ( nn.Module ):
def __init__( self , a__=3 , a__=3 , a__=("DownEncoderBlock2D",) , a__=(64,) , a__=2 , a__=32 , a__="silu" , a__=True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
snake_case_ = layers_per_block
snake_case_ = torch.nn.Convad(
a__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
snake_case_ = None
snake_case_ = nn.ModuleList([] )
# down
snake_case_ = block_out_channels[0]
for i, down_block_type in enumerate(a__ ):
snake_case_ = output_channel
snake_case_ = block_out_channels[i]
snake_case_ = i == len(a__ ) - 1
snake_case_ = get_down_block(
a__ , num_layers=self.layers_per_block , in_channels=a__ , out_channels=a__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=a__ , resnet_groups=a__ , attention_head_dim=a__ , temb_channels=a__ , )
self.down_blocks.append(a__ )
# mid
snake_case_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=a__ , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=a__ , temb_channels=a__ , )
# out
snake_case_ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=a__ , eps=1e-6 )
snake_case_ = nn.SiLU()
snake_case_ = 2 * out_channels if double_z else out_channels
snake_case_ = nn.Convad(block_out_channels[-1] , a__ , 3 , padding=1 )
snake_case_ = False
def lowerCAmelCase__ ( self , a__ ) -> Tuple:
'''simple docstring'''
snake_case_ = x
snake_case_ = self.conv_in(a__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(a__ ):
def custom_forward(*a__ ):
return module(*a__ )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
snake_case_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(a__ ) , a__ , use_reentrant=a__ )
# middle
snake_case_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , a__ , use_reentrant=a__ )
else:
for down_block in self.down_blocks:
snake_case_ = torch.utils.checkpoint.checkpoint(create_custom_forward(a__ ) , a__ )
# middle
snake_case_ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , a__ )
else:
# down
for down_block in self.down_blocks:
snake_case_ = down_block(a__ )
# middle
snake_case_ = self.mid_block(a__ )
# post-process
snake_case_ = self.conv_norm_out(a__ )
snake_case_ = self.conv_act(a__ )
snake_case_ = self.conv_out(a__ )
return sample
class _snake_case ( nn.Module ):
def __init__( self , a__=3 , a__=3 , a__=("UpDecoderBlock2D",) , a__=(64,) , a__=2 , a__=32 , a__="silu" , a__="group" , ) -> int:
'''simple docstring'''
super().__init__()
snake_case_ = layers_per_block
snake_case_ = nn.Convad(
a__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
snake_case_ = None
snake_case_ = nn.ModuleList([] )
snake_case_ = in_channels if norm_type == "spatial" else None
# mid
snake_case_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=a__ , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=a__ , temb_channels=a__ , )
# up
snake_case_ = list(reversed(a__ ) )
snake_case_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(a__ ):
snake_case_ = output_channel
snake_case_ = reversed_block_out_channels[i]
snake_case_ = i == len(a__ ) - 1
snake_case_ = get_up_block(
a__ , num_layers=self.layers_per_block + 1 , in_channels=a__ , out_channels=a__ , prev_output_channel=a__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=a__ , resnet_groups=a__ , attention_head_dim=a__ , temb_channels=a__ , resnet_time_scale_shift=a__ , )
self.up_blocks.append(a__ )
snake_case_ = output_channel
# out
if norm_type == "spatial":
snake_case_ = SpatialNorm(block_out_channels[0] , a__ )
else:
snake_case_ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=a__ , eps=1e-6 )
snake_case_ = nn.SiLU()
snake_case_ = nn.Convad(block_out_channels[0] , a__ , 3 , padding=1 )
snake_case_ = False
def lowerCAmelCase__ ( self , a__ , a__=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = z
snake_case_ = self.conv_in(a__ )
snake_case_ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(a__ ):
def custom_forward(*a__ ):
return module(*a__ )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
snake_case_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , a__ , a__ , use_reentrant=a__ )
snake_case_ = sample.to(a__ )
# up
for up_block in self.up_blocks:
snake_case_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(a__ ) , a__ , a__ , use_reentrant=a__ )
else:
# middle
snake_case_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , a__ , a__ )
snake_case_ = sample.to(a__ )
# up
for up_block in self.up_blocks:
snake_case_ = torch.utils.checkpoint.checkpoint(create_custom_forward(a__ ) , a__ , a__ )
else:
# middle
snake_case_ = self.mid_block(a__ , a__ )
snake_case_ = sample.to(a__ )
# up
for up_block in self.up_blocks:
snake_case_ = up_block(a__ , a__ )
# post-process
if latent_embeds is None:
snake_case_ = self.conv_norm_out(a__ )
else:
snake_case_ = self.conv_norm_out(a__ , a__ )
snake_case_ = self.conv_act(a__ )
snake_case_ = self.conv_out(a__ )
return sample
class _snake_case ( nn.Module ):
def __init__( self , a__ , a__ , a__ , a__=None , a__="random" , a__=False , a__=True ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
snake_case_ = n_e
snake_case_ = vq_embed_dim
snake_case_ = beta
snake_case_ = legacy
snake_case_ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
snake_case_ = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
snake_case_ = self.used.shape[0]
snake_case_ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
snake_case_ = self.re_embed
snake_case_ = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
snake_case_ = n_e
snake_case_ = sane_index_shape
def lowerCAmelCase__ ( self , a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = inds.shape
assert len(a__ ) > 1
snake_case_ = inds.reshape(ishape[0] , -1 )
snake_case_ = self.used.to(a__ )
snake_case_ = (inds[:, :, None] == used[None, None, ...]).long()
snake_case_ = match.argmax(-1 )
snake_case_ = match.sum(2 ) < 1
if self.unknown_index == "random":
snake_case_ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
snake_case_ = self.unknown_index
return new.reshape(a__ )
def lowerCAmelCase__ ( self , a__ ) -> str:
'''simple docstring'''
snake_case_ = inds.shape
assert len(a__ ) > 1
snake_case_ = inds.reshape(ishape[0] , -1 )
snake_case_ = self.used.to(a__ )
if self.re_embed > self.used.shape[0]: # extra token
snake_case_ = 0 # simply set to zero
snake_case_ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , a__ )
return back.reshape(a__ )
def lowerCAmelCase__ ( self , a__ ) -> str:
'''simple docstring'''
snake_case_ = z.permute(0 , 2 , 3 , 1 ).contiguous()
snake_case_ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
snake_case_ = torch.argmin(torch.cdist(a__ , self.embedding.weight ) , dim=1 )
snake_case_ = self.embedding(a__ ).view(z.shape )
snake_case_ = None
snake_case_ = None
# compute loss for embedding
if not self.legacy:
snake_case_ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
snake_case_ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
snake_case_ = z + (z_q - z).detach()
# reshape back to match original input shape
snake_case_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
snake_case_ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
snake_case_ = self.remap_to_used(a__ )
snake_case_ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
snake_case_ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCAmelCase__ ( self , a__ , a__ ) -> List[str]:
'''simple docstring'''
if self.remap is not None:
snake_case_ = indices.reshape(shape[0] , -1 ) # add batch axis
snake_case_ = self.unmap_to_all(a__ )
snake_case_ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
snake_case_ = self.embedding(a__ )
if shape is not None:
snake_case_ = z_q.view(a__ )
# reshape back to match original input shape
snake_case_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class _snake_case ( lowercase_ ):
def __init__( self , a__ , a__=False ) -> Optional[int]:
'''simple docstring'''
snake_case_ = parameters
snake_case_ , snake_case_ = torch.chunk(a__ , 2 , dim=1 )
snake_case_ = torch.clamp(self.logvar , -3_0.0 , 2_0.0 )
snake_case_ = deterministic
snake_case_ = torch.exp(0.5 * self.logvar )
snake_case_ = torch.exp(self.logvar )
if self.deterministic:
snake_case_ = snake_case_ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCAmelCase__ ( self , a__ = None ) -> torch.FloatTensor:
'''simple docstring'''
snake_case_ = randn_tensor(
self.mean.shape , generator=a__ , device=self.parameters.device , dtype=self.parameters.dtype )
snake_case_ = self.mean + self.std * sample
return x
def lowerCAmelCase__ ( self , a__=None ) -> List[str]:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCAmelCase__ ( self , a__ , a__=[1, 2, 3] ) -> Optional[int]:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
snake_case_ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=a__ )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return self.mean
| 400 | 1 |
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A : List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
A : int = 50_003
A : Union[str, Any] = 50_002
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( snake_case__ , unittest.TestCase ):
'''simple docstring'''
A = PLBartTokenizer
A = None
A = False
def lowerCamelCase__ ( self :Any ) -> Tuple:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase__ = PLBartTokenizer(lowerCamelCase_ , language_codes="base" , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self :str ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = PLBartTokenizer(lowerCamelCase_ , language_codes="base" , keep_accents=lowerCamelCase_ )
UpperCamelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
UpperCamelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
UpperCamelCase__ = tokenizer.vocab_size
UpperCamelCase__ = [tokenizer.convert_ids_to_tokens(lowerCamelCase_ ) for x in range(end - 4 , lowerCamelCase_ )]
self.assertListEqual(lowerCamelCase_ , ["__java__", "__python__", "__en_XX__", "<mask>"] )
UpperCamelCase__ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
UpperCamelCase__ = tokenizer(lowerCamelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ ) , lowerCamelCase_ , )
def lowerCamelCase__ ( self :Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = PLBartTokenizer(lowerCamelCase_ , language_codes="multi" , keep_accents=lowerCamelCase_ )
UpperCamelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
UpperCamelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
UpperCamelCase__ = tokenizer.vocab_size
UpperCamelCase__ = [tokenizer.convert_ids_to_tokens(lowerCamelCase_ ) for x in range(end - 7 , lowerCamelCase_ )]
self.assertListEqual(
lowerCamelCase_ , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
UpperCamelCase__ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
UpperCamelCase__ = tokenizer(lowerCamelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ ) , lowerCamelCase_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
A = 'uclanlp/plbart-python-en_XX'
A = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
A = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
A = [
1_3_4,
5_4_5_2,
3_3_4_6_0,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
9_8_8,
2_0,
3_3_4_5_6,
1_9,
3_3_4_5_6,
7_7_1,
3_9,
4_2_5_8,
8_8_9,
3_3_1_8,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
2_4_7_1,
2,
PYTHON_CODE,
]
@classmethod
def lowerCamelCase__ ( cls :Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
UpperCamelCase__ = 1
return cls
def lowerCamelCase__ ( self :Any ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0_0_0_3 )
def lowerCamelCase__ ( self :Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
def lowerCamelCase__ ( self :str ) -> List[Any]:
"""simple docstring"""
self.assertIn(lowerCamelCase_ , self.tokenizer.all_special_ids )
UpperCamelCase__ = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
UpperCamelCase__ = self.tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
UpperCamelCase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase_ )
def lowerCamelCase__ ( self :int ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 2_0]
self.assertIsInstance(src_text[0] , lowerCamelCase_ )
UpperCamelCase__ = 1_0
UpperCamelCase__ = self.tokenizer(lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCamelCase_ )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
def lowerCamelCase__ ( self :Union[str, Any] ) -> int:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [5_0_0_0_4, 5_0_0_0_1] )
def lowerCamelCase__ ( self :Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase_ )
UpperCamelCase__ = PLBartTokenizer.from_pretrained(lowerCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase_ )
@require_torch
def lowerCamelCase__ ( self :Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , return_tensors="pt" )
UpperCamelCase__ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , lowerCamelCase_ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCamelCase__ ( self :Union[str, Any] ) -> str:
"""simple docstring"""
UpperCamelCase__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
UpperCamelCase__ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual((2, 2_6) , batch.input_ids.shape )
self.assertEqual((2, 2_6) , batch.attention_mask.shape )
UpperCamelCase__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCamelCase__ ( self :List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ = self.tokenizer(self.src_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=3 , return_tensors="pt" )
UpperCamelCase__ = self.tokenizer(
text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=1_0 , return_tensors="pt" )
UpperCamelCase__ = targets["input_ids"]
UpperCamelCase__ = shift_tokens_right(lowerCamelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def lowerCamelCase__ ( self :Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , {
# A, test, EOS, en_XX
"input_ids": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0_0_0_1,
} , ) | 304 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A : List[str] = logging.get_logger(__name__)
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
A = ['pixel_values']
def __init__( self :str , lowerCamelCase_ :bool = True , lowerCamelCase_ :Dict[str, int] = None , lowerCamelCase_ :PILImageResampling = PIL.Image.BICUBIC , lowerCamelCase_ :bool = True , lowerCamelCase_ :Dict[str, int] = None , lowerCamelCase_ :Union[int, float] = 1 / 2_5_5 , lowerCamelCase_ :bool = True , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Union[float, List[float]]] = None , lowerCamelCase_ :Optional[Union[float, List[float]]] = None , **lowerCamelCase_ :int , ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
UpperCamelCase__ = size if size is not None else {"height": 2_5_6, "width": 2_5_6}
UpperCamelCase__ = get_size_dict(lowerCamelCase_ )
UpperCamelCase__ = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
UpperCamelCase__ = get_size_dict(lowerCamelCase_ , param_name="crop_size" )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = resample
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self :List[Any] , lowerCamelCase_ :np.ndarray , lowerCamelCase_ :Dict[str, int] , lowerCamelCase_ :PILImageResampling = PIL.Image.BICUBIC , lowerCamelCase_ :Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ :Dict , ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase__ = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return resize(
lowerCamelCase_ , size=(size["height"], size["width"]) , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase__ ( self :int , lowerCamelCase_ :np.ndarray , lowerCamelCase_ :Dict[str, int] , lowerCamelCase_ :Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ :List[Any] , ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase__ = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(lowerCamelCase_ , size=(size["height"], size["width"]) , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase__ ( self :Dict , lowerCamelCase_ :np.ndarray , lowerCamelCase_ :Union[int, float] , lowerCamelCase_ :Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ :int , ) -> str:
"""simple docstring"""
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase__ ( self :Any , lowerCamelCase_ :np.ndarray , lowerCamelCase_ :Union[float, List[float]] , lowerCamelCase_ :Union[float, List[float]] , lowerCamelCase_ :Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ :Any , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase__ ( self :str , lowerCamelCase_ :ImageInput , lowerCamelCase_ :bool = None , lowerCamelCase_ :Dict[str, int] = None , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :bool = None , lowerCamelCase_ :Dict[str, int] = None , lowerCamelCase_ :bool = None , lowerCamelCase_ :float = None , lowerCamelCase_ :bool = None , lowerCamelCase_ :Optional[Union[float, List[float]]] = None , lowerCamelCase_ :Optional[Union[float, List[float]]] = None , lowerCamelCase_ :Optional[Union[str, TensorType]] = None , lowerCamelCase_ :ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase_ :Optional[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(lowerCamelCase_ )
UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ = get_size_dict(lowerCamelCase_ , param_name="crop_size" )
UpperCamelCase__ = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images]
if do_center_crop:
UpperCamelCase__ = [self.center_crop(image=lowerCamelCase_ , size=lowerCamelCase_ ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
UpperCamelCase__ = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ ) | 304 | 1 |
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : str = generate_pascal_triangle(UpperCamelCase__ )
for row_idx in range(UpperCamelCase__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError("The input value of \'num_rows\' should be \'int\'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of \'num_rows\' should be greater than or equal to 0" )
lowerCamelCase : Dict = []
for current_row_idx in range(UpperCamelCase__ ):
lowerCamelCase : List[str] = populate_current_row(UpperCamelCase__ , UpperCamelCase__ )
triangle.append(UpperCamelCase__ )
return triangle
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Tuple = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowerCamelCase , lowerCamelCase : Optional[Any] = 1, 1
for current_col_idx in range(1 , UpperCamelCase__ ):
calculate_current_element(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return current_row
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowerCamelCase : Tuple = triangle[current_row_idx - 1][current_col_idx - 1]
lowerCamelCase : Union[str, Any] = triangle[current_row_idx - 1][current_col_idx]
lowerCamelCase : Any = above_to_left_elt + above_to_right_elt
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError("The input value of \'num_rows\' should be \'int\'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of \'num_rows\' should be greater than or equal to 0" )
lowerCamelCase : Any = [[1]]
for row_index in range(1 , UpperCamelCase__ ):
lowerCamelCase : int = [0] + result[-1] + [0]
lowerCamelCase : Union[str, Any] = row_index + 1
# Calculate the number of distinct elements in a row
lowerCamelCase : Tuple = sum(divmod(UpperCamelCase__ , 2 ) )
lowerCamelCase : Union[str, Any] = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
lowerCamelCase : List[str] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowerCamelCase : Any = row_first_half + row_second_half
result.append(UpperCamelCase__ )
return result
def lowercase_( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
lowerCamelCase : Union[str, Any] = f"""{func.__name__}({value})"""
lowerCamelCase : int = timeit(f"""__main__.{call}""" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(UpperCamelCase__ , UpperCamelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 340 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ ) -> bool:
if number < 0:
raise ValueError('''number must not be negative''' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 546 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__A : Optional[int] = '\nHuman: <<task>>\n\nAssistant: '
__A : str = 'huggingface-tools/default-prompts'
__A : Dict = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[Any]="run" ):
'''simple docstring'''
if prompt_or_repo_id is None:
snake_case_ : int = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("""\\s""" , lowerCamelCase_ ) is not None:
return prompt_or_repo_id
snake_case_ : Optional[Any] = cached_file(
lowerCamelCase_ , PROMPT_FILES[mode] , repo_type="""dataset""" , user_agent={"""agent""": agent_name} )
with open(lowerCamelCase_ , """r""" , encoding="""utf-8""" ) as f:
return f.read() | 267 |
'''simple docstring'''
import random
from typing import Any
def UpperCAmelCase ( lowerCamelCase_ :list ):
'''simple docstring'''
for _ in range(len(lowerCamelCase_ ) ):
snake_case_ : Union[str, Any] = random.randint(0 , len(lowerCamelCase_ ) - 1 )
snake_case_ : Any = random.randint(0 , len(lowerCamelCase_ ) - 1 )
snake_case_ , snake_case_ : List[str] = data[b], data[a]
return data
if __name__ == "__main__":
__A : Optional[int] = [0, 1, 2, 3, 4, 5, 6, 7]
__A : Optional[int] = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 267 | 1 |
import random
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Dict:
_UpperCAmelCase = [], [], []
for element in data:
if element < pivot:
less.append(lowerCamelCase__ )
elif element > pivot:
greater.append(lowerCamelCase__ )
else:
equal.append(lowerCamelCase__ )
return less, equal, greater
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Any:
if index >= len(lowerCamelCase__ ) or index < 0:
return None
_UpperCAmelCase = items[random.randint(0 , len(lowerCamelCase__ ) - 1 )]
_UpperCAmelCase = 0
_UpperCAmelCase = _partition(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase = len(lowerCamelCase__ )
_UpperCAmelCase = len(lowerCamelCase__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(lowerCamelCase__ , lowerCamelCase__ )
# must be in larger
else:
return quick_select(lowerCamelCase__ , index - (m + count) ) | 108 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCAmelCase__ = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
lowerCAmelCase__ = {'''facebook/blenderbot-3B''': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Any = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
lowercase__ : Tuple = bs[:]
lowercase__ : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase__ )
cs.append(2**8 + n )
n += 1
lowercase__ : Union[str, Any] = [chr(lowerCamelCase__ ) for n in cs]
return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = set()
lowercase__ : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ : str = char
return pairs
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any]="replace" , SCREAMING_SNAKE_CASE : str="<s>" , SCREAMING_SNAKE_CASE : Dict="</s>" , SCREAMING_SNAKE_CASE : str="</s>" , SCREAMING_SNAKE_CASE : str="<s>" , SCREAMING_SNAKE_CASE : List[str]="<unk>" , SCREAMING_SNAKE_CASE : Union[str, Any]="<pad>" , SCREAMING_SNAKE_CASE : Optional[int]="<mask>" , SCREAMING_SNAKE_CASE : Union[str, Any]=False , **SCREAMING_SNAKE_CASE : Tuple , ):
lowercase__ : List[Any] = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else bos_token
lowercase__ : List[str] = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else eos_token
lowercase__ : Dict = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else sep_token
lowercase__ : Optional[int] = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else cls_token
lowercase__ : Tuple = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else unk_token
lowercase__ : int = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ : Tuple = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as vocab_handle:
lowercase__ : List[Any] = json.load(SCREAMING_SNAKE_CASE )
lowercase__ : str = {v: k for k, v in self.encoder.items()}
lowercase__ : str = errors # how to handle errors in decoding
lowercase__ : List[Any] = bytes_to_unicode()
lowercase__ : Optional[int] = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as merges_handle:
lowercase__ : str = merges_handle.read().split("\n" )[1:-1]
lowercase__ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
lowercase__ : List[Any] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : List[Any] = {}
lowercase__ : str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase__ : List[str] = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def snake_case ( self : int ):
return len(self.encoder )
def snake_case ( self : int ):
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Any ):
if token in self.cache:
return self.cache[token]
lowercase__ : str = tuple(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = get_pairs(SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
lowercase__ : Optional[int] = min(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : self.bpe_ranks.get(SCREAMING_SNAKE_CASE , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__ : str = bigram
lowercase__ : Optional[Any] = []
lowercase__ : Dict = 0
while i < len(SCREAMING_SNAKE_CASE ):
try:
lowercase__ : str = word.index(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase__ : List[Any] = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ : Any = tuple(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = new_word
if len(SCREAMING_SNAKE_CASE ) == 1:
break
else:
lowercase__ : Optional[int] = get_pairs(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = " ".join(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = word
return word
def snake_case ( self : str , SCREAMING_SNAKE_CASE : List[Any] ):
lowercase__ : List[Any] = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE ):
lowercase__ : Dict = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE ).split(" " ) )
return bpe_tokens
def snake_case ( self : int , SCREAMING_SNAKE_CASE : List[Any] ):
return self.encoder.get(SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Any ):
return self.decoder.get(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : List[Any] = "".join(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : int = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : str = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE , ensure_ascii=SCREAMING_SNAKE_CASE ) + "\n" )
lowercase__ : List[str] = 0
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
lowercase__ : int = token_index
writer.write(" ".join(SCREAMING_SNAKE_CASE ) + "\n" )
index += 1
return vocab_file, merge_file
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
lowercase__ : List[Any] = [self.sep_token_id]
lowercase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict=False , **SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : List[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE ) > 0 and not text[0].isspace()):
lowercase__ : Any = " " + text
return (text, kwargs)
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : "Conversation" ):
lowercase__ : Dict = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = " ".join(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = self.encode(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > self.model_max_length:
lowercase__ : Optional[Any] = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 496 | 0 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Dict , a__ : str=0.0_1 , a__ : Union[str, Any]=1000 ):
"""simple docstring"""
__snake_case = p_stop
__snake_case = max_length
def __iter__(self : Union[str, Any] ):
"""simple docstring"""
__snake_case = 0
__snake_case = False
while not stop and count < self.max_length:
yield count
count += 1
__snake_case = random.random() < self.p_stop
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Optional[int] , a__ : Tuple , a__ : Dict , a__ : Union[str, Any]=False , a__ : Tuple=True ):
"""simple docstring"""
__snake_case = [
BatchSamplerShard(a__ , 2 , a__ , split_batches=a__ , even_batches=a__ )
for i in range(2 )
]
__snake_case = [list(a__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(a__ ) for shard in batch_sampler_shards] , [len(a__ ) for e in expected] )
self.assertListEqual(a__ , a__ )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ )
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ )
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ )
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(a__ , a__ )
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
__snake_case = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(a__ , a__ )
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
__snake_case = [[], []]
self.check_batch_sampler_shards(a__ , a__ )
def a (self : Any ):
"""simple docstring"""
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
__snake_case = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
__snake_case = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
__snake_case = [[], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
__snake_case = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__snake_case = [BatchSamplerShard(a__ , 2 , a__ , even_batches=a__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def a (self : int , a__ : str , a__ : Tuple , a__ : List[Any] , a__ : int=False , a__ : int=2 , a__ : List[Any]=False ):
"""simple docstring"""
random.seed(a__ )
__snake_case = list(a__ )
__snake_case = [
IterableDatasetShard(
a__ , batch_size=a__ , drop_last=a__ , num_processes=a__ , process_index=a__ , split_batches=a__ , )
for i in range(a__ )
]
__snake_case = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(a__ )
iterable_dataset_lists.append(list(a__ ) )
__snake_case = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__snake_case = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(a__ ) , len(a__ ) )
self.assertTrue(len(a__ ) % shard_batch_size == 0 )
__snake_case = []
for idx in range(0 , len(a__ ) , a__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(a__ ) < len(a__ ):
reference += reference
self.assertListEqual(a__ , reference[: len(a__ )] )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = 42
__snake_case = RandomIterableDataset()
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
# Edge case with a very small dataset
__snake_case = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
def a (self : Any ):
"""simple docstring"""
__snake_case = BatchSampler(range(16 ) , batch_size=4 , drop_last=a__ )
__snake_case = SkipBatchSampler(a__ , 2 )
self.assertListEqual(list(a__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a (self : Dict ):
"""simple docstring"""
__snake_case = DataLoader(list(range(16 ) ) , batch_size=4 )
__snake_case = skip_first_batches(a__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a (self : Any ):
"""simple docstring"""
__snake_case = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def a (self : Union[str, Any] ):
"""simple docstring"""
Accelerator()
__snake_case = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 388 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Tuple , *a__ : Optional[Any] , **a__ : Any ):
"""simple docstring"""
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , a__ , )
super().__init__(*a__ , **a__ )
| 388 | 1 |
import os
import sys
import unittest
lowerCAmelCase : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowerCAmelCase : Any = os.path.join(git_repo_path, '''src''', '''diffusers''')
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
snake_case__ = find_backend(""" if not is_torch_available():""" )
self.assertEqual(a__ , """torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
snake_case__ = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(a__ , """torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
snake_case__ = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(a__ , """torch_and_transformers_and_onnx""" )
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
snake_case__ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , a__ )
self.assertIn("""torch_and_transformers""" , a__ )
self.assertIn("""flax_and_transformers""" , a__ )
self.assertIn("""torch_and_transformers_and_onnx""" , a__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""" , objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""" , objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""" , objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""" , objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""" , objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""" , objects["""torch_and_transformers_and_onnx"""] )
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
snake_case__ = create_dummy_object("""CONSTANT""" , """\'torch\'""" )
self.assertEqual(a__ , """\nCONSTANT = None\n""" )
snake_case__ = create_dummy_object("""function""" , """\'torch\'""" )
self.assertEqual(
a__ , """\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n""" )
snake_case__ = """
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
"""
snake_case__ = create_dummy_object("""FakeClass""" , """\'torch\'""" )
self.assertEqual(a__ , a__ )
def UpperCAmelCase_ ( self : int ) -> List[Any]:
snake_case__ = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
snake_case__ = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , a__ )
| 214 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __lowercase ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
A_ = tempfile.mkdtemp()
A_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
A_ = {
'''do_resize''': True,
'''size''': {'''height''': 2_2_4, '''width''': 2_2_4},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
A_ = os.path.join(self.tmpdirname , a__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(a__ , a__ )
def lowerCAmelCase_ ( self , **a__ ) -> List[Any]:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **a__ )
def lowerCAmelCase_ ( self , **a__ ) -> str:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def lowerCAmelCase_ ( self , **a__ ) -> List[str]:
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **a__ )
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
A_ = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.get_tokenizer()
A_ = self.get_rust_tokenizer()
A_ = self.get_image_processor()
A_ = ChineseCLIPProcessor(tokenizer=a__ , image_processor=a__ )
processor_slow.save_pretrained(self.tmpdirname )
A_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=a__ )
A_ = ChineseCLIPProcessor(tokenizer=a__ , image_processor=a__ )
processor_fast.save_pretrained(self.tmpdirname )
A_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , a__ )
self.assertIsInstance(processor_fast.tokenizer , a__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , a__ )
self.assertIsInstance(processor_fast.image_processor , a__ )
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
A_ = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
A_ = self.get_image_processor(do_normalize=a__ )
A_ = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=a__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , a__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a__ )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = ChineseCLIPProcessor(tokenizer=a__ , image_processor=a__ )
A_ = self.prepare_image_inputs()
A_ = image_processor(a__ , return_tensors='''np''' )
A_ = processor(images=a__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = ChineseCLIPProcessor(tokenizer=a__ , image_processor=a__ )
A_ = '''Alexandra,T-shirt的价格是15便士。'''
A_ = processor(text=a__ )
A_ = tokenizer(a__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = ChineseCLIPProcessor(tokenizer=a__ , image_processor=a__ )
A_ = '''Alexandra,T-shirt的价格是15便士。'''
A_ = self.prepare_image_inputs()
A_ = processor(text=a__ , images=a__ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(a__ ):
processor()
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = ChineseCLIPProcessor(tokenizer=a__ , image_processor=a__ )
A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ = processor.batch_decode(a__ )
A_ = tokenizer.batch_decode(a__ )
self.assertListEqual(a__ , a__ )
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = ChineseCLIPProcessor(tokenizer=a__ , image_processor=a__ )
A_ = '''Alexandra,T-shirt的价格是15便士。'''
A_ = self.prepare_image_inputs()
A_ = processor(text=a__ , images=a__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 141 | 0 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase = "cpu" , _lowercase = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
__a : Any = device
__a : Tuple = CLIPTokenizerFast.from_pretrained(_lowercase )
__a : List[str] = [0.4814_5466, 0.457_8275, 0.4082_1073]
__a : Optional[Any] = [0.2686_2954, 0.2613_0258, 0.2757_7711]
__a : Optional[int] = torchvision.transforms.Normalize(self.image_mean , self.image_std )
__a : Tuple = torchvision.transforms.Resize(224 )
__a : str = torchvision.transforms.CenterCrop(224 )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : int = self.resize(_lowercase )
__a : List[str] = self.center_crop(_lowercase )
__a : Union[str, Any] = self.normalize(_lowercase )
return images
def __call__(self , _lowercase=None , _lowercase=None , **_lowercase ):
'''simple docstring'''
__a : Any = self.tokenizer(text=_lowercase , **_lowercase )
__a : Union[str, Any] = self.preprocess_img(_lowercase )
__a : Any = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self , _lowercase=10 , _lowercase=0.01 , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=False , _lowercase=True , _lowercase="image" , _lowercase=True , _lowercase=False , _lowercase=False , _lowercase=False , ):
'''simple docstring'''
super().__init__()
__a : Any = None
__a : Tuple = device if device else get_device()
if vqgan:
__a : Optional[Any] = vqgan
else:
__a : List[Any] = load_vqgan(self.device , conf_path=_lowercase , ckpt_path=_lowercase )
self.vqgan.eval()
if clip:
__a : str = clip
else:
__a : Dict = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
__a : Any = ProcessorGradientFlow(device=self.device )
__a : Any = iterations
__a : Optional[Any] = lr
__a : Optional[Any] = log
__a : Dict = make_grid
__a : Dict = return_val
__a : Tuple = quantize
__a : Optional[Any] = self.vqgan.decoder.z_shape
def lowerCAmelCase__(self , _lowercase=None , _lowercase=None , _lowercase=5 , _lowercase=True ):
'''simple docstring'''
__a : str = []
if output_path is None:
__a : Optional[int] = """./animation.gif"""
if input_path is None:
__a : List[str] = self.save_path
__a : Tuple = sorted(glob(input_path + """/*""" ) )
if not len(_lowercase ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(_lowercase ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
__a : Union[str, Any] = total_duration / len(_lowercase )
__a : Optional[Any] = [frame_duration] * len(_lowercase )
if extend_frames:
__a : Optional[int] = 1.5
__a : str = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(_lowercase ) )
imageio.mimsave(_lowercase , _lowercase , duration=_lowercase )
print(F'''gif saved to {output_path}''' )
def lowerCAmelCase__(self , _lowercase=None , _lowercase=None ):
'''simple docstring'''
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
__a : Any = preprocess(Image.open(_lowercase ) , target_image_size=256 ).to(self.device )
__a : str = preprocess_vqgan(_lowercase )
__a , *__a : List[str] = self.vqgan.encode(_lowercase )
return z
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : List[str] = self.latent.detach().requires_grad_()
__a : Optional[Any] = base_latent + transform_vector
if self.quantize:
__a , *__a : str = self.vqgan.quantize(_lowercase )
else:
__a : str = trans_latent
return self.vqgan.decode(_lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase=None ):
'''simple docstring'''
__a : Tuple = self.clip_preprocessor(text=_lowercase , images=_lowercase , return_tensors="""pt""" , padding=_lowercase )
__a : Any = self.clip(**_lowercase )
__a : List[str] = clip_outputs.logits_per_image
if weights is not None:
__a : Optional[int] = similarity_logits * weights
return similarity_logits.sum()
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Any = self._get_clip_similarity(pos_prompts["""prompts"""] , _lowercase , weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
__a : Tuple = self._get_clip_similarity(neg_prompts["""prompts"""] , _lowercase , weights=neg_prompts["""weights"""] )
else:
__a : Any = torch.tensor([1] , device=self.device )
__a : Dict = -torch.log(_lowercase ) + torch.log(_lowercase )
return loss
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[int] = torch.randn_like(self.latent , requires_grad=_lowercase , device=self.device )
__a : List[Any] = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
__a : int = self._add_vector(_lowercase )
__a : Any = loop_post_process(_lowercase )
__a : Dict = self._get_CLIP_loss(_lowercase , _lowercase , _lowercase )
print("""CLIP loss""" , _lowercase )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=_lowercase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
wandb.init(reinit=_lowercase , project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
__a : Optional[int] = Image.open(_lowercase )
__a : Union[str, Any] = image.resize((256, 256) )
wandb.log("""Original Image""" , wandb.Image(_lowercase ) )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if not prompts:
return []
__a : Optional[Any] = []
__a : Optional[Any] = []
if isinstance(_lowercase , _lowercase ):
__a : Dict = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(_lowercase , (tuple, list) ):
__a : str = prompt[0]
__a : Optional[Any] = float(prompt[1] )
elif ":" in prompt:
__a , __a : Optional[int] = prompt.split(""":""" )
__a : Any = float(_lowercase )
else:
__a : Dict = prompt
__a : List[str] = 1.0
processed_prompts.append(_lowercase )
weights.append(_lowercase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(_lowercase , device=self.device ),
}
def lowerCAmelCase__(self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=True , _lowercase=False , _lowercase=True , _lowercase=True , _lowercase=None , ):
'''simple docstring'''
if image_path:
__a : int = self._get_latent(_lowercase )
else:
__a : Optional[Any] = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(_lowercase , _lowercase , _lowercase )
assert pos_prompts, "You must provide at least one positive prompt."
__a : Union[str, Any] = self.process_prompts(_lowercase )
__a : Optional[Any] = self.process_prompts(_lowercase )
if save_final and save_path is None:
__a : Dict = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(_lowercase ):
os.makedirs(_lowercase )
else:
__a : Union[str, Any] = save_path + """_""" + get_timestamp()
os.makedirs(_lowercase )
__a : Tuple = save_path
__a : str = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(_lowercase ) )
__a : Optional[int] = loop_post_process(_lowercase )
for iter, transformed_img in enumerate(self._optimize_CLIP(_lowercase , _lowercase , _lowercase ) ):
if show_intermediate:
show_pil(_lowercase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({"""Image""": wandb.Image(_lowercase )} )
if show_final:
show_pil(_lowercase )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}_final.png''' ) )
| 63 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowercase__ = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class snake_case_ ( _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: int = """bit"""
SCREAMING_SNAKE_CASE_: int = ["""preactivation""", """bottleneck"""]
SCREAMING_SNAKE_CASE_: Optional[Any] = ["""SAME""", """VALID"""]
def __init__( self , __a=3 , __a=64 , __a=[256, 512, 1024, 2048] , __a=[3, 4, 6, 3] , __a="preactivation" , __a="relu" , __a=None , __a=32 , __a=0.0 , __a=False , __a=32 , __a=1 , __a=None , __a=None , **__a , ):
"""simple docstring"""
super().__init__(**__a )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A__ = global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
A__ = num_channels
A__ = embedding_size
A__ = hidden_sizes
A__ = depths
A__ = layer_type
A__ = hidden_act
A__ = global_padding
A__ = num_groups
A__ = drop_path_rate
A__ = embedding_dynamic_padding
A__ = output_stride
A__ = width_factor
A__ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(__a ) + 1 )]
A__ , A__ = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
| 260 |
"""simple docstring"""
def __lowerCamelCase ( lowerCAmelCase__ ):
A__ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
A__ = set()
return any(
node not in visited and depth_first_search(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
for node in graph )
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
visited.add(lowerCAmelCase__ )
rec_stk.add(lowerCAmelCase__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(lowerCAmelCase__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 260 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ) -> str:
lowerCamelCase__ = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
lowerCamelCase__ = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
sd_pipe.set_scheduler("sample_euler" )
lowerCamelCase__ = "A painting of a squirrel eating a burger"
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
lowerCamelCase__ = output.images
lowerCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCamelCase ( self ) -> Any:
lowerCamelCase__ = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
lowerCamelCase__ = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
sd_pipe.set_scheduler("sample_euler" )
lowerCamelCase__ = "A painting of a squirrel eating a burger"
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
lowerCamelCase__ = output.images
lowerCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def _lowerCamelCase ( self ) -> Optional[int]:
lowerCamelCase__ = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
lowerCamelCase__ = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
lowerCamelCase__ = "A painting of a squirrel eating a burger"
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = output.images
lowerCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ = np.array(
[0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 274 |
"""simple docstring"""
# Algorithm for the pigeonhole sorting
def UpperCAmelCase__ ( A__ ) -> Dict:
"""simple docstring"""
lowerCamelCase__ = min(A__ ) # min() finds the minimum value
lowerCamelCase__ = max(A__ ) # max() finds the maximum value
lowerCamelCase__ = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
lowerCamelCase__ = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(A__ , A__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
lowerCamelCase__ = 0
for count in range(A__ ):
while holes[count] > 0:
holes[count] -= 1
lowerCamelCase__ = count + min_val
i += 1
def UpperCAmelCase__ ( ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(A__ )
print("Sorted order is:" , " ".join(A__ ) )
if __name__ == "__main__":
main()
| 274 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__magic_name__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : Dict , *_snake_case : Optional[Any] , **_snake_case : Union[str, Any] ) -> None:
'''simple docstring'''
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 232 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 0 |
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCamelCase : List[Any] = """
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def _UpperCAmelCase ( UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any=8 ):
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
__lowerCamelCase : Any = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _UpperCamelCase ( __snake_case ):
'''simple docstring'''
def __init__( self : Dict , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , ):
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=A_ , tokenizer=A_ , unet=A_ , scheduler=A_ , movq=A_ , )
__lowerCamelCase : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _snake_case ( self : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Any , _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : List[str] ):
'''simple docstring'''
if latents is None:
__lowerCamelCase : int = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
__lowerCamelCase : Optional[Any] = latents.to(A_ )
__lowerCamelCase : Dict = latents * scheduler.init_noise_sigma
return latents
def _snake_case ( self : List[str] , _lowerCamelCase : int , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any]=None , ):
'''simple docstring'''
__lowerCamelCase : int = len(A_ ) if isinstance(A_ , A_ ) else 1
# get prompt text embeddings
__lowerCamelCase : Any = self.tokenizer(
A_ , padding="""max_length""" , truncation=A_ , max_length=7_7 , return_attention_mask=A_ , add_special_tokens=A_ , return_tensors="""pt""" , )
__lowerCamelCase : List[Any] = text_inputs.input_ids
__lowerCamelCase : List[Any] = self.tokenizer(A_ , padding="""longest""" , return_tensors="""pt""" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(A_ , A_ ):
__lowerCamelCase : Union[str, Any] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__lowerCamelCase : str = text_input_ids.to(A_ )
__lowerCamelCase : int = text_inputs.attention_mask.to(A_ )
__lowerCamelCase : Optional[int] = self.text_encoder(
input_ids=A_ , attention_mask=A_ )
__lowerCamelCase : Optional[Any] = prompt_embeds.repeat_interleave(A_ , dim=0 )
__lowerCamelCase : Any = text_encoder_hidden_states.repeat_interleave(A_ , dim=0 )
__lowerCamelCase : int = text_mask.repeat_interleave(A_ , dim=0 )
if do_classifier_free_guidance:
__lowerCamelCase : List[str]
if negative_prompt is None:
__lowerCamelCase : Optional[Any] = [""] * batch_size
elif type(A_ ) is not type(A_ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(A_ )} !="""
F""" {type(A_ )}.""" )
elif isinstance(A_ , A_ ):
__lowerCamelCase : Union[str, Any] = [negative_prompt]
elif batch_size != len(A_ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(A_ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
__lowerCamelCase : Optional[int] = negative_prompt
__lowerCamelCase : Tuple = self.tokenizer(
A_ , padding="""max_length""" , max_length=7_7 , truncation=A_ , return_attention_mask=A_ , add_special_tokens=A_ , return_tensors="""pt""" , )
__lowerCamelCase : Optional[Any] = uncond_input.input_ids.to(A_ )
__lowerCamelCase : Tuple = uncond_input.attention_mask.to(A_ )
__lowerCamelCase : Optional[int] = self.text_encoder(
input_ids=A_ , attention_mask=A_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCamelCase : int = negative_prompt_embeds.shape[1]
__lowerCamelCase : Any = negative_prompt_embeds.repeat(1 , A_ )
__lowerCamelCase : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A_ )
__lowerCamelCase : List[Any] = uncond_text_encoder_hidden_states.shape[1]
__lowerCamelCase : int = uncond_text_encoder_hidden_states.repeat(1 , A_ , 1 )
__lowerCamelCase : Union[str, Any] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , A_ , -1 )
__lowerCamelCase : Optional[int] = uncond_text_mask.repeat_interleave(A_ , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCamelCase : List[Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
__lowerCamelCase : Optional[int] = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
__lowerCamelCase : Tuple = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def _snake_case ( self : Optional[int] , _lowerCamelCase : Tuple=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__lowerCamelCase : Dict = torch.device(F"""cuda:{gpu_id}""" )
__lowerCamelCase : Optional[int] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A_ , A_ )
def _snake_case ( self : Tuple , _lowerCamelCase : List[str]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
__lowerCamelCase : Union[str, Any] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=A_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCamelCase : Tuple = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
__lowerCamelCase : Optional[int] = cpu_offload_with_hook(A_ , A_ , prev_module_hook=A_ )
if self.safety_checker is not None:
__lowerCamelCase : Any = cpu_offload_with_hook(self.safety_checker , A_ , prev_module_hook=A_ )
# We'll offload the last model manually.
__lowerCamelCase : str = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _snake_case ( self : Dict ):
'''simple docstring'''
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(A_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A_ )
def __call__( self : Dict , _lowerCamelCase : Dict , _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : List[str] = None , _lowerCamelCase : Dict = 5_1_2 , _lowerCamelCase : Dict = 5_1_2 , _lowerCamelCase : Dict = 1_0_0 , _lowerCamelCase : Tuple = 4.0 , _lowerCamelCase : Optional[Any] = 1 , _lowerCamelCase : Dict = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : List[str] = "pil" , _lowerCamelCase : Optional[int] = True , ):
'''simple docstring'''
if isinstance(A_ , A_ ):
__lowerCamelCase : Optional[Any] = 1
elif isinstance(A_ , A_ ):
__lowerCamelCase : List[Any] = len(A_ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(A_ )}""" )
__lowerCamelCase : str = self._execution_device
__lowerCamelCase : Dict = batch_size * num_images_per_prompt
__lowerCamelCase : Optional[int] = guidance_scale > 1.0
__lowerCamelCase : Union[str, Any] = self._encode_prompt(
A_ , A_ , A_ , A_ , A_ )
if isinstance(A_ , A_ ):
__lowerCamelCase : Any = torch.cat(A_ , dim=0 )
if isinstance(A_ , A_ ):
__lowerCamelCase : int = torch.cat(A_ , dim=0 )
if do_classifier_free_guidance:
__lowerCamelCase : int = image_embeds.repeat_interleave(A_ , dim=0 )
__lowerCamelCase : Optional[Any] = negative_image_embeds.repeat_interleave(A_ , dim=0 )
__lowerCamelCase : List[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=A_ )
self.scheduler.set_timesteps(A_ , device=A_ )
__lowerCamelCase : Any = self.scheduler.timesteps
__lowerCamelCase : Tuple = self.unet.config.in_channels
__lowerCamelCase : Union[str, Any] = get_new_h_w(A_ , A_ , self.movq_scale_factor )
# create initial latent
__lowerCamelCase : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , A_ , A_ , A_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the latents if we are doing classifier free guidance
__lowerCamelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCamelCase : Dict = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
__lowerCamelCase : int = self.unet(
sample=A_ , timestep=A_ , encoder_hidden_states=A_ , added_cond_kwargs=A_ , return_dict=A_ , )[0]
if do_classifier_free_guidance:
__lowerCamelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCamelCase : str = noise_pred.chunk(2 )
__lowerCamelCase : Optional[int] = variance_pred.chunk(2 )
__lowerCamelCase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCamelCase : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCamelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase : Optional[int] = self.scheduler.step(
A_ , A_ , A_ , generator=A_ , ).prev_sample
# post-processing
__lowerCamelCase : Tuple = self.movq.decode(A_ , force_not_quantize=A_ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
__lowerCamelCase : Tuple = image * 0.5 + 0.5
__lowerCamelCase : Union[str, Any] = image.clamp(0 , 1 )
__lowerCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCamelCase : Tuple = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 716 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Any = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 458 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowerCAmelCase = logging.get_logger(__name__)
class lowerCamelCase_ ( snake_case_ ):
__lowercase : List[Any] = ['input_features', 'is_longer']
def __init__( self , lowerCamelCase_=64 , lowerCamelCase_=4_80_00 , lowerCamelCase_=4_80 , lowerCamelCase_=10 , lowerCamelCase_=10_24 , lowerCamelCase_=0.0 , lowerCamelCase_=False , lowerCamelCase_ = 0 , lowerCamelCase_ = 1_40_00 , lowerCamelCase_ = None , lowerCamelCase_ = "fusion" , lowerCamelCase_ = "repeatpad" , **lowerCamelCase_ , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
feature_size=_a , sampling_rate=_a , padding_value=_a , return_attention_mask=_a , **_a , )
_UpperCamelCase = top_db
_UpperCamelCase = truncation
_UpperCamelCase = padding
_UpperCamelCase = fft_window_size
_UpperCamelCase = (fft_window_size >> 1) + 1
_UpperCamelCase = hop_length
_UpperCamelCase = max_length_s
_UpperCamelCase = max_length_s * sampling_rate
_UpperCamelCase = sampling_rate
_UpperCamelCase = frequency_min
_UpperCamelCase = frequency_max
_UpperCamelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_a , min_frequency=_a , max_frequency=_a , sampling_rate=_a , norm=_a , mel_scale="htk" , )
_UpperCamelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_a , min_frequency=_a , max_frequency=_a , sampling_rate=_a , norm="slaney" , mel_scale="slaney" , )
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = copy.deepcopy(self.__dict__ )
_UpperCamelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = spectrogram(
_a , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_a , log_mel="dB" , )
return log_mel_spectrogram.T
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_UpperCamelCase = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_UpperCamelCase = [0]
# randomly choose index for each part
_UpperCamelCase = np.random.choice(ranges[0] )
_UpperCamelCase = np.random.choice(ranges[1] )
_UpperCamelCase = np.random.choice(ranges[2] )
_UpperCamelCase = mel[idx_front : idx_front + chunk_frames, :]
_UpperCamelCase = mel[idx_middle : idx_middle + chunk_frames, :]
_UpperCamelCase = mel[idx_back : idx_back + chunk_frames, :]
_UpperCamelCase = torch.tensor(mel[None, None, :] )
_UpperCamelCase = torch.nn.functional.interpolate(
_a , size=[chunk_frames, 64] , mode="bilinear" , align_corners=_a )
_UpperCamelCase = mel_shrink[0][0].numpy()
_UpperCamelCase = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
"""simple docstring"""
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_UpperCamelCase = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_UpperCamelCase = len(_a ) - max_length
_UpperCamelCase = np.random.randint(0 , overflow + 1 )
_UpperCamelCase = waveform[idx : idx + max_length]
_UpperCamelCase = self._np_extract_fbank_features(_a , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_UpperCamelCase = self._np_extract_fbank_features(_a , self.mel_filters )
_UpperCamelCase = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_UpperCamelCase = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_UpperCamelCase = np.stack([mel, mel, mel, mel] , axis=0 )
_UpperCamelCase = False
else:
_UpperCamelCase = self._random_mel_fusion(_a , _a , _a )
_UpperCamelCase = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
_UpperCamelCase = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_UpperCamelCase = int(max_length / len(_a ) )
_UpperCamelCase = np.stack(np.tile(_a , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_UpperCamelCase = int(max_length / len(_a ) )
_UpperCamelCase = np.stack(np.tile(_a , _a ) )
_UpperCamelCase = np.pad(_a , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
_UpperCamelCase = self._np_extract_fbank_features(_a , self.mel_filters )
_UpperCamelCase = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_UpperCamelCase = self._np_extract_fbank_features(_a , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> int:
"""simple docstring"""
_UpperCamelCase = truncation if truncation is not None else self.truncation
_UpperCamelCase = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_UpperCamelCase = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
_UpperCamelCase = is_batched_numpy or (
isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCamelCase = [np.asarray(_a , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_a , np.ndarray ):
_UpperCamelCase = np.asarray(_a , dtype=np.floataa )
elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCamelCase = [np.asarray(_a )]
# convert to mel spectrogram, truncate and pad if needed.
_UpperCamelCase = [
self._get_input_mel(_a , max_length if max_length else self.nb_max_samples , _a , _a )
for waveform in raw_speech
]
_UpperCamelCase = []
_UpperCamelCase = []
for mel, longer in padded_inputs:
input_mel.append(_a )
is_longer.append(_a )
if truncation == "fusion" and sum(_a ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_UpperCamelCase = np.random.randint(0 , len(_a ) )
_UpperCamelCase = True
if isinstance(input_mel[0] , _a ):
_UpperCamelCase = [np.asarray(_a , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_UpperCamelCase = [[longer] for longer in is_longer]
_UpperCamelCase = {"input_features": input_mel, "is_longer": is_longer}
_UpperCamelCase = BatchFeature(_a )
if return_tensors is not None:
_UpperCamelCase = input_features.convert_to_tensors(_a )
return input_features
| 147 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' , [
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(__lowerCAmelCase , i + 1 ) for i in range(10 )]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
snake_case__ = _distribute_shards(**__lowerCAmelCase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' , [
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
snake_case__ = _split_gen_kwargs(__lowerCAmelCase , __lowerCAmelCase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' , [
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
if expected is RuntimeError:
with pytest.raises(__lowerCAmelCase ):
_number_of_shards_in_gen_kwargs(__lowerCAmelCase )
else:
snake_case__ = _number_of_shards_in_gen_kwargs(__lowerCAmelCase )
assert out == expected
| 33 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ ={
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ =["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ =[
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ =[
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ =[
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 718 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase__ =logging.get_logger(__name__)
@add_end_docstrings(__magic_name__ )
class A__( __magic_name__ ):
def __init__( self : Optional[Any] , **__SCREAMING_SNAKE_CASE : str ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image", List["Image"]] , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : int , **__SCREAMING_SNAKE_CASE : int ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
if "candidate_labels" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def _a ( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]="This is a photo of {}." ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = load_image(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.image_processor(images=[image] , return_tensors=self.framework )
__SCREAMING_SNAKE_CASE = candidate_labels
__SCREAMING_SNAKE_CASE = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels]
__SCREAMING_SNAKE_CASE = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [text_inputs]
return inputs
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = model_inputs.pop('''candidate_labels''' )
__SCREAMING_SNAKE_CASE = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = text_inputs[0]
else:
# Batching case.
__SCREAMING_SNAKE_CASE = text_inputs[0][0]
__SCREAMING_SNAKE_CASE = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = model_outputs.pop('''candidate_labels''' )
__SCREAMING_SNAKE_CASE = model_outputs['''logits'''][0]
if self.framework == "pt":
__SCREAMING_SNAKE_CASE = logits.softmax(dim=-1 ).squeeze(-1 )
__SCREAMING_SNAKE_CASE = probs.tolist()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [scores]
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
__SCREAMING_SNAKE_CASE = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__SCREAMING_SNAKE_CASE = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : -x[0] )
]
return result
| 690 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 167 | from collections import namedtuple
import requests
from lxml import html # type: ignore
snake_case = namedtuple("covid_data", "cases deaths recovered")
def UpperCamelCase_ ( lowerCAmelCase__ = "https://www.worldometers.info/coronavirus/" ):
"""simple docstring"""
_lowerCAmelCase : int = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(lowerCAmelCase__ ).content ).xpath(lowerCAmelCase__ ) )
snake_case = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 424 | 0 |
import functools
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = len(__lowerCAmelCase )
lowerCAmelCase_ = len(__lowerCAmelCase )
@functools.cache
def min_distance(__lowerCAmelCase : int , __lowerCAmelCase : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowerCAmelCase_ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __lowerCAmelCase ) , 1 + min_distance(__lowerCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_A = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 279 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( ) -> int:
return 1
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCAmelCase_ )
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int:
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCAmelCase_ )
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int:
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCAmelCase_ )
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int:
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCAmelCase_ )
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int:
return 0 if x < 0 else one_pound(x - 1_00 ) + fifty_pence(UpperCAmelCase_ )
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int:
return 0 if x < 0 else two_pound(x - 2_00 ) + one_pound(UpperCAmelCase_ )
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 2_00 ) -> int:
return two_pound(UpperCAmelCase_ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 13 |
"""simple docstring"""
from math import isqrt, loga
def _snake_case ( __snake_case : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __snake_case , __snake_case ):
_lowerCamelCase : Optional[int] = False
return [i for i in range(2 , __snake_case ) if is_prime[i]]
def _snake_case ( __snake_case : int = 800800 , __snake_case : int = 800800 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = degree * loga(__snake_case )
_lowerCamelCase : Union[str, Any] = int(__snake_case )
_lowerCamelCase : Dict = calculate_prime_numbers(__snake_case )
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : Any = 0
_lowerCamelCase : Any = len(__snake_case ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 88 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = MobileBertTokenizer
_SCREAMING_SNAKE_CASE = MobileBertTokenizerFast
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = filter_non_english
_SCREAMING_SNAKE_CASE = 'google/mobilebert-uncased'
def _snake_case ( self ) -> Optional[Any]:
super().setUp()
lowerCAmelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCAmelCase = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def _snake_case ( self , lowercase ) -> Optional[int]:
lowerCAmelCase = """UNwant\u00E9d,running"""
lowerCAmelCase = """unwanted, running"""
return input_text, output_text
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.tokenizer_class(self.vocab_file )
lowerCAmelCase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowercase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [9, 6, 7, 12, 10, 11] )
def _snake_case ( self ) -> Any:
if not self.test_rust_tokenizer:
return
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = """UNwant\u00E9d,running"""
lowerCAmelCase = tokenizer.tokenize(lowercase )
lowerCAmelCase = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
lowerCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
lowerCAmelCase = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = tokenizer.encode(lowercase )
lowerCAmelCase = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
# With lower casing
lowerCAmelCase = self.get_tokenizer(do_lower_case=lowercase )
lowerCAmelCase = self.get_rust_tokenizer(do_lower_case=lowercase )
lowerCAmelCase = """UNwant\u00E9d,running"""
lowerCAmelCase = tokenizer.tokenize(lowercase )
lowerCAmelCase = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
lowerCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
lowerCAmelCase = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = tokenizer.encode(lowercase )
lowerCAmelCase = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def _snake_case ( self ) -> int:
lowerCAmelCase = BasicTokenizer(do_lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _snake_case ( self ) -> Any:
lowerCAmelCase = BasicTokenizer(do_lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _snake_case ( self ) -> str:
lowerCAmelCase = BasicTokenizer(do_lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = BasicTokenizer(do_lower_case=lowercase , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def _snake_case ( self ) -> Dict:
lowerCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
lowerCAmelCase = {}
for i, token in enumerate(lowercase ):
lowerCAmelCase = i
lowerCAmelCase = WordpieceTokenizer(vocab=lowercase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def _snake_case ( self ) -> List[Any]:
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def _snake_case ( self ) -> Dict:
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def _snake_case ( self ) -> List[Any]:
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowercase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowercase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def _snake_case ( self ) -> int:
lowerCAmelCase = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
lowerCAmelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase )
lowerCAmelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def _snake_case ( self ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
lowerCAmelCase = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowerCAmelCase = tokenizer_r.encode_plus(
lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase , )
lowerCAmelCase = tokenizer_r.do_lower_case if hasattr(lowercase , """do_lower_case""" ) else False
lowerCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = ["""的""", """人""", """有"""]
lowerCAmelCase = """""".join(lowercase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase = True
lowerCAmelCase = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
lowerCAmelCase = tokenizer_p.encode(lowercase , add_special_tokens=lowercase )
lowerCAmelCase = tokenizer_r.encode(lowercase , add_special_tokens=lowercase )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(lowercase )
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(lowercase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , lowercase )
lowerCAmelCase = False
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
lowerCAmelCase = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
lowerCAmelCase = tokenizer_r.encode(lowercase , add_special_tokens=lowercase )
lowerCAmelCase = tokenizer_p.encode(lowercase , add_special_tokens=lowercase )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(lowercase )
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(lowercase )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(lowercase )
]
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , lowercase )
| 393 |
"""simple docstring"""
import pytest
SCREAMING_SNAKE_CASE__ = "__dummy_dataset1__"
SCREAMING_SNAKE_CASE__ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def UpperCAmelCase__ ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCAmelCase__ ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
lowerCAmelCase = dataset_loading_script_name
lowerCAmelCase = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=SCREAMING_SNAKE_CASE )
lowerCAmelCase = script_dir / F'{script_name}.py'
with open(SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write(SCREAMING_SNAKE_CASE )
return str(SCREAMING_SNAKE_CASE )
| 393 | 1 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_A : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = XGLMTokenizer
lowerCamelCase__ : int = XGLMTokenizerFast
lowerCamelCase__ : str = True
lowerCamelCase__ : List[str] = True
def lowercase_ ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ = XGLMTokenizer(A_ , keep_accents=A_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''<pad>'''
SCREAMING_SNAKE_CASE__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(len(A_ ) , 10_08 )
def lowercase_ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_08 )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = XGLMTokenizer(A_ , keep_accents=A_ )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(A_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(
A_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowercase_ ( self ):
'''simple docstring'''
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def lowercase_ ( self ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(A_ , f.name )
SCREAMING_SNAKE_CASE__ = XGLMTokenizer(f.name , keep_accents=A_ )
SCREAMING_SNAKE_CASE__ = pickle.dumps(A_ )
pickle.loads(A_ )
def lowercase_ ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = '''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(A_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(A_ , add_special_tokens=A_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = tokenizer.encode(A_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''Hello World!'''
SCREAMING_SNAKE_CASE__ = [2, 3_12_27, 44_47, 35]
self.assertListEqual(A_ , self.big_tokenizer.encode(A_ ) )
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
SCREAMING_SNAKE_CASE__ = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35]
# fmt: on
self.assertListEqual(A_ , self.big_tokenizer.encode(A_ ) )
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {
'''input_ids''': [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name='''facebook/xglm-564M''' , padding=A_ , )
| 100 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase__ ( __lowerCamelCase ):
a__ : List[str] = 'philschmid/bart-large-cnn-samsum'
a__ : List[Any] = (
'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '
'and returns a summary of the text.'
)
a__ : int = 'summarizer'
a__ : int = AutoTokenizer
a__ : Any = AutoModelForSeqaSeqLM
a__ : Optional[int] = ['text']
a__ : Optional[int] = ['text']
def __lowercase( self : int, __lowerCamelCase : List[str] ) -> List[Any]:
return self.pre_processor(__lowerCamelCase, return_tensors='''pt''', truncation=__lowerCamelCase )
def __lowercase( self : int, __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
return self.model.generate(**__lowerCamelCase )[0]
def __lowercase( self : Optional[Any], __lowerCamelCase : Optional[int] ) -> Any:
return self.pre_processor.decode(__lowerCamelCase, skip_special_tokens=__lowerCamelCase, clean_up_tokenization_spaces=__lowerCamelCase )
| 344 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json',
}
class a_ ( lowerCamelCase ):
lowercase = """nllb-moe"""
lowercase = ["""past_key_values"""]
lowercase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , _SCREAMING_SNAKE_CASE=128112 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.0_5 , _SCREAMING_SNAKE_CASE=0.0_5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="float32" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.0_0_1 , _SCREAMING_SNAKE_CASE=0.0_0_1 , _SCREAMING_SNAKE_CASE="all" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=0.2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = d_model
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = encoder_layers
UpperCamelCase = encoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = encoder_layerdrop
UpperCamelCase = decoder_layerdrop
UpperCamelCase = use_cache
UpperCamelCase = encoder_layers
UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase = router_z_loss_coef
UpperCamelCase = router_aux_loss_coef
UpperCamelCase = decoder_sparse_step
UpperCamelCase = encoder_sparse_step
UpperCamelCase = num_experts
UpperCamelCase = expert_capacity
UpperCamelCase = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
UpperCamelCase = router_dtype
UpperCamelCase = router_ignore_padding_tokens
UpperCamelCase = batch_prioritized_routing
UpperCamelCase = second_expert_policy
UpperCamelCase = normalize_router_prob_before_dropping
UpperCamelCase = moe_eval_capacity_token_fraction
UpperCamelCase = moe_token_dropout
UpperCamelCase = output_router_logits
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
| 35 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-1'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-2'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-3'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-4'
class a_ ( lowerCamelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , ) -> Any:
"""simple docstring"""
super()._init_()
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline(
vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , requires_safety_checker=_SCREAMING_SNAKE_CASE , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def A__ ( self ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , _SCREAMING_SNAKE_CASE ) for k in self.config.keys() if not k.startswith("""_""" )}
def A__ ( self , _SCREAMING_SNAKE_CASE = "auto" ) -> Optional[Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Tuple:
"""simple docstring"""
self.enable_attention_slicing(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
UpperCamelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(_SCREAMING_SNAKE_CASE )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 35 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _snake_case ( _lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = VideoToVideoSDPipeline
__snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"""image""", """width""", """height"""}
__snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"""image"""}
__snake_case = PipelineTesterMixin.required_optional_params - {"""latents"""}
__snake_case = False
# No `output_type`.
__snake_case = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def lowerCAmelCase__ ( self: int ) -> Optional[Any]:
torch.manual_seed(0 )
__magic_name__ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
__magic_name__ : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , )
torch.manual_seed(0 )
__magic_name__ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__magic_name__ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
__magic_name__ : Optional[Any] = CLIPTextModel(__UpperCamelCase )
__magic_name__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__magic_name__ : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def lowerCAmelCase__ ( self: Optional[int] , __UpperCamelCase: Tuple , __UpperCamelCase: Tuple=0 ) -> List[str]:
__magic_name__ : Union[str, Any] = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
if str(__UpperCamelCase ).startswith("mps" ):
__magic_name__ : int = torch.manual_seed(__UpperCamelCase )
else:
__magic_name__ : int = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
__magic_name__ : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def lowerCAmelCase__ ( self: Optional[int] ) -> Union[str, Any]:
__magic_name__ : int = "cpu" # ensure determinism for the device-dependent torch.Generator
__magic_name__ : Optional[Any] = self.get_dummy_components()
__magic_name__ : Optional[int] = VideoToVideoSDPipeline(**__UpperCamelCase )
__magic_name__ : Tuple = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
__magic_name__ : int = self.get_dummy_inputs(__UpperCamelCase )
__magic_name__ : List[str] = "np"
__magic_name__ : Union[str, Any] = sd_pipe(**__UpperCamelCase ).frames
__magic_name__ : Any = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__magic_name__ : int = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCAmelCase__ ( self: Optional[int] ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__UpperCamelCase , expected_max_diff=5E-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def lowerCAmelCase__ ( self: Optional[Any] ) -> Tuple:
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> int:
pass
def lowerCAmelCase__ ( self: List[str] ) -> List[Any]:
return super().test_progress_bar()
@slow
@skip_mps
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self: str ) -> Optional[int]:
__magic_name__ : List[Any] = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__magic_name__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
__magic_name__ : str = torch.randn((1, 10, 3, 1024, 576) , generator=__UpperCamelCase )
__magic_name__ : Any = video.to("cuda" )
__magic_name__ : str = "Spiderman is surfing"
__magic_name__ : Optional[int] = pipe(__UpperCamelCase , video=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=3 , output_type="pt" ).frames
__magic_name__ : int = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2 | 436 |
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = []
__lowercase = set({"(", "[", "{"} )
__lowercase = set({")", "]", "}"} )
__lowercase = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_SCREAMING_SNAKE_CASE ) == 0 or (len(_SCREAMING_SNAKE_CASE ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_SCREAMING_SNAKE_CASE ) == 0
def snake_case_ ( ):
__lowercase = input("Enter sequence of brackets: " )
if is_balanced(_SCREAMING_SNAKE_CASE ):
print(_SCREAMING_SNAKE_CASE , "is balanced" )
else:
print(_SCREAMING_SNAKE_CASE , "is not balanced" )
if __name__ == "__main__":
main()
| 402 | 0 |
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a ):
__a = size
__a = [0] * size
__a = [0] * size
@staticmethod
def __UpperCAmelCase ( _a ):
return index | (index + 1)
@staticmethod
def __UpperCAmelCase ( _a ):
return (index & (index + 1)) - 1
def __UpperCAmelCase ( self , _a , _a ):
__a = value
while index < self.size:
__a = self.get_prev(lowercase__ ) + 1
if current_left_border == index:
__a = value
else:
__a = max(lowercase__ , lowercase__ , lowercase__ )
__a = self.get_next(lowercase__ )
def __UpperCAmelCase ( self , _a , _a ):
right -= 1 # Because of right is exclusive
__a = 0
while left <= right:
__a = self.get_prev(lowercase__ )
if left <= current_left:
__a = max(lowercase__ , self.tree[right] )
__a = current_left
else:
__a = max(lowercase__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 0 |
import argparse
from collections import defaultdict
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Tuple ,__a : Tuple ,__a : Dict ,__a : Tuple ) -> List[Any]:
"""simple docstring"""
_a : List[str] = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__a ,'''r''' ) as f:
_a : Dict = f.readlines()
_a : str = F"""class {class_name}("""
_a : Tuple = F"""{4 * ' '}def {test_name}("""
_a : List[Any] = F"""{8 * ' '}{correct_line.split()[0]}"""
_a : Tuple = F"""{16 * ' '}{correct_line.split()[0]}"""
_a : Tuple = False
_a : str = False
_a : Any = False
_a : Dict = False
_a : Tuple = 0
_a : List[str] = 0
_a : List[Any] = []
for line in lines:
if line.startswith(__a ):
_a : Tuple = True
elif in_class and line.startswith(__a ):
_a : List[str] = True
elif in_class and in_func and (line.startswith(__a ) or line.startswith(__a )):
_a : Tuple = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_a : Dict = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_a : Optional[Any] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * ' '}{correct_line}""" )
_a : Optional[Any] = False
else:
new_lines.append(__a )
with open(__a ,'''w''' ) as f:
for line in new_lines:
f.write(__a )
def __UpperCAmelCase ( __a : Dict ,__a : Tuple=None ) -> Union[str, Any]:
"""simple docstring"""
if fail is not None:
with open(__a ,'''r''' ) as f:
_a : Optional[int] = {l.strip() for l in f.readlines()}
else:
_a : List[Any] = None
with open(__a ,'''r''' ) as f:
_a : List[Any] = f.readlines()
_a : List[Any] = defaultdict(__a )
for line in correct_lines:
_a , _a , _a , _a : Dict = line.split(''';''' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__a ,__a ,__a ,__a ,__a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
a__ = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 14 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCAmelCase__ :int = logging.get_logger(__name__)
def lowerCAmelCase__ ( a__: Dict ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = R'\w+[.]\d+'
_UpperCAmelCase = re.findall(a__ , a__ )
for pat in pats:
_UpperCAmelCase = key.replace(a__ , '_'.join(pat.split('.' ) ) )
return key
def lowerCAmelCase__ ( a__: Optional[int] , a__: Dict , a__: List[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
_UpperCAmelCase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
_UpperCAmelCase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
_UpperCAmelCase = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
_UpperCAmelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
_UpperCAmelCase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_UpperCAmelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
_UpperCAmelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_UpperCAmelCase = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_UpperCAmelCase = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCAmelCase__ ( a__: Any , a__: Optional[Any] , a__: int=4_2 ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
_UpperCAmelCase = flax_model.init_weights(PRNGKey(a__ ) )
_UpperCAmelCase = flatten_dict(a__ )
_UpperCAmelCase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_UpperCAmelCase = rename_key(a__ )
_UpperCAmelCase = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
_UpperCAmelCase , _UpperCAmelCase = rename_key_and_reshape_tensor(a__ , a__ , a__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
_UpperCAmelCase = jnp.asarray(a__ )
return unflatten_dict(a__ )
| 618 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_lowercase = logging.get_logger(__name__)
_lowercase = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
_lowercase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __lowerCAmelCase ( _UpperCamelCase ) -> Any:
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCamelCase__: str = model_type_to_module_name(_UpperCamelCase )
lowerCamelCase__: Dict = importlib.import_module(f""".{module_name}""" , """transformers.models""" )
try:
return getattr(_UpperCamelCase , _UpperCamelCase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(_UpperCamelCase , """__name__""" , _UpperCamelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCamelCase__: List[Any] = importlib.import_module("""transformers""" )
if hasattr(_UpperCamelCase , _UpperCamelCase ):
return getattr(_UpperCamelCase , _UpperCamelCase )
return None
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , **_UpperCamelCase , ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] = get_file_from_repo(
_UpperCamelCase , _UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , resume_download=_UpperCamelCase , proxies=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , local_files_only=_UpperCamelCase , )
if resolved_config_file is None:
logger.info(
"""Could not locate the image processor configuration file, will try to use the model config instead.""" )
return {}
with open(_UpperCamelCase , encoding="""utf-8""" ) as reader:
return json.load(_UpperCamelCase )
class lowerCamelCase__ :
def __init__( self : int ):
'''simple docstring'''
raise EnvironmentError(
"""AutoImageProcessor is designed to be instantiated """
"""using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(__a )
def lowerCamelCase_ ( cls : Dict , __a : Optional[int] , **__a : List[str] ):
'''simple docstring'''
lowerCamelCase__: int = kwargs.pop("""config""" , __a )
lowerCamelCase__: int = kwargs.pop("""trust_remote_code""" , __a )
lowerCamelCase__: Tuple = True
lowerCamelCase__ , lowerCamelCase__: Tuple = ImageProcessingMixin.get_image_processor_dict(__a , **__a )
lowerCamelCase__: List[Any] = config_dict.get("""image_processor_type""" , __a )
lowerCamelCase__: Optional[Any] = None
if "AutoImageProcessor" in config_dict.get("""auto_map""" , {} ):
lowerCamelCase__: Optional[int] = config_dict["""auto_map"""]["""AutoImageProcessor"""]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
lowerCamelCase__: Any = config_dict.pop("""feature_extractor_type""" , __a )
if feature_extractor_class is not None:
logger.warning(
"""Could not find image processor class in the image processor config or the model config. Loading"""
""" based on pattern matching with the model's feature extractor configuration.""" )
lowerCamelCase__: Optional[int] = feature_extractor_class.replace("""FeatureExtractor""" , """ImageProcessor""" )
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
lowerCamelCase__: Dict = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
lowerCamelCase__: str = feature_extractor_auto_map.replace("""FeatureExtractor""" , """ImageProcessor""" )
logger.warning(
"""Could not find image processor auto map in the image processor config or the model config."""
""" Loading based on pattern matching with the model's feature extractor configuration.""" )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(__a , __a ):
lowerCamelCase__: Optional[Any] = AutoConfig.from_pretrained(__a , **__a )
# It could be in `config.image_processor_type``
lowerCamelCase__: str = getattr(__a , """image_processor_type""" , __a )
if hasattr(__a , """auto_map""" ) and "AutoImageProcessor" in config.auto_map:
lowerCamelCase__: Union[str, Any] = config.auto_map["""AutoImageProcessor"""]
if image_processor_class is not None:
lowerCamelCase__: int = image_processor_class_from_name(__a )
lowerCamelCase__: Dict = image_processor_auto_map is not None
lowerCamelCase__: List[Any] = image_processor_class is not None or type(__a ) in IMAGE_PROCESSOR_MAPPING
lowerCamelCase__: List[Any] = resolve_trust_remote_code(
__a , __a , __a , __a )
if has_remote_code and trust_remote_code:
lowerCamelCase__: Union[str, Any] = get_class_from_dynamic_module(
__a , __a , **__a )
lowerCamelCase__: Optional[Any] = kwargs.pop("""code_revision""" , __a )
if os.path.isdir(__a ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(__a , **__a )
elif image_processor_class is not None:
return image_processor_class.from_dict(__a , **__a )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(__a ) in IMAGE_PROCESSOR_MAPPING:
lowerCamelCase__: Any = IMAGE_PROCESSOR_MAPPING[type(__a )]
return image_processor_class.from_dict(__a , **__a )
raise ValueError(
f"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
f"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
f"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def lowerCamelCase_ ( __a : str , __a : Optional[int] ):
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(__a , __a )
| 242 |
def __lowerCAmelCase ( _UpperCamelCase ) -> list[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] = [0 for i in range(len(_UpperCamelCase ) )]
# initialize interval's left pointer and right pointer
lowerCamelCase__ , lowerCamelCase__: int = 0, 0
for i in range(1 , len(_UpperCamelCase ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowerCamelCase__: Tuple = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowerCamelCase__: Dict = min_edge
while go_next(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowerCamelCase__ , lowerCamelCase__: str = i, i + z_result[i] - 1
return z_result
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> bool:
'''simple docstring'''
return i + z_result[i] < len(_UpperCamelCase ) and s[z_result[i]] == s[i + z_result[i]]
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> int:
'''simple docstring'''
lowerCamelCase__: List[str] = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowerCamelCase__: int = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_UpperCamelCase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 242 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.