code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Tuple = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = BartphoTokenizer
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : List[Any] = True
def a__ (self ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : str = ['▁This', '▁is', '▁a', '▁t', 'est']
lowerCamelCase__ : Dict = dict(zip(lowerCamelCase_, range(len(lowerCamelCase_ ) ) ) )
lowerCamelCase__ : Union[str, Any] = {'unk_token': '<unk>'}
lowerCamelCase__ : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['monolingual_vocab_file'] )
with open(self.monolingual_vocab_file, 'w', encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
lowerCamelCase__ : Optional[Any] = BartphoTokenizer(lowerCamelCase_, self.monolingual_vocab_file, **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = 'This is a là test'
lowerCamelCase__ : Any = 'This is a<unk><unk> test'
return input_text, output_text
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = BartphoTokenizer(lowerCamelCase_, self.monolingual_vocab_file, **self.special_tokens_map )
lowerCamelCase__ : List[str] = 'This is a là test'
lowerCamelCase__ : Dict = '▁This ▁is ▁a ▁l à ▁t est'.split()
lowerCamelCase__ : int = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = tokens + [tokenizer.unk_token]
lowerCamelCase__ : str = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ), lowerCamelCase_ )
| 721 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
A_ : str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, lowerCamelCase_=1 ):
'''simple docstring'''
lowerCamelCase__ : Any = tokenizer
lowerCamelCase__ : Optional[Any] = dataset
lowerCamelCase__ : int = len(lowerCamelCase_ ) if n_tasks is None else n_tasks
lowerCamelCase__ : Any = n_copies
def __iter__(self ):
'''simple docstring'''
lowerCamelCase__ : Dict = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ : Optional[int] = self.tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = start_length
lowerCamelCase__ : List[str] = eof_strings
lowerCamelCase__ : List[str] = tokenizer
def __call__(self, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ : Optional[Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = re.split('(%s)' % '|'.join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=20 , **_lowerCamelCase ):
lowerCamelCase__ : List[str] = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
lowerCamelCase__ : str = batch['ids'].shape[-1]
lowerCamelCase__ : int = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
lowerCamelCase__ : Optional[Any] = batch['task_id'].repeat(_lowerCamelCase )
lowerCamelCase__ : List[Any] = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ : List[Any] = generated_tokens.cpu().numpy()
lowerCamelCase__ : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
lowerCamelCase__ : str = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ : Optional[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def lowerCamelCase_ ( ):
# Setup configuration
lowerCamelCase__ : int = HfArgumentParser(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ : Tuple = 'false'
if args.num_workers is None:
lowerCamelCase__ : List[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ : List[Any] = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
lowerCamelCase__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ : Optional[int] = tokenizer.eos_token
lowerCamelCase__ : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ : Optional[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ : Any = load_dataset('openai_humaneval' )
lowerCamelCase__ : Optional[int] = load_metric('code_eval' )
lowerCamelCase__ : List[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ : Optional[int] = args.n_samples // args.batch_size
lowerCamelCase__ : Tuple = TokenizedDataset(_lowerCamelCase , human_eval['test'] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ : Union[str, Any] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ : List[Any] = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ : str = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
lowerCamelCase__ : List[str] = []
for task in tqdm(range(_lowerCamelCase ) ):
lowerCamelCase__ : int = human_eval['test'][task]['test']
lowerCamelCase__ : Union[str, Any] = f'''check({human_eval['test'][task]['entry_point']})'''
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ : Any = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 696 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case_ )
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : str = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
lowerCamelCase__ : ClassVar[Features] = Features({'text': Value('string' )} )
lowerCamelCase__ : ClassVar[Features] = Features({'summary': Value('string' )} )
lowerCamelCase__ : str = "text"
lowerCamelCase__ : str = "summary"
@property
def a__ (self ):
'''simple docstring'''
return {self.text_column: "text", self.summary_column: "summary"}
| 700 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : str = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
| 696 | 0 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 10**-10 ):
lowerCamelCase__ : Optional[int] = a
while True:
lowerCamelCase__ : Any = Decimal(_lowerCamelCase ) - (
Decimal(eval(_lowerCamelCase ) ) / Decimal(eval(str(diff(_lowerCamelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_lowerCamelCase ) ) < precision: # noqa: S307
return float(_lowerCamelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
print(f"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}")
# Find Square Root of 5
print(f"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}")
# Exponential Roots
print(f"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}")
| 701 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = 0
while number > 0:
lowerCamelCase__ : List[str] = number % 10
sum_of_digits += last_digit
lowerCamelCase__ : str = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCamelCase_ ( _lowerCamelCase = 100 ):
lowerCamelCase__ : Union[str, Any] = factorial(_lowerCamelCase )
lowerCamelCase__ : List[Any] = split_and_add(_lowerCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 696 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : str = {
"configuration_bridgetower": [
"BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BridgeTowerConfig",
"BridgeTowerTextConfig",
"BridgeTowerVisionConfig",
],
"processing_bridgetower": ["BridgeTowerProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ["BridgeTowerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
"BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST",
"BridgeTowerForContrastiveLearning",
"BridgeTowerForImageAndTextRetrieval",
"BridgeTowerForMaskedLM",
"BridgeTowerModel",
"BridgeTowerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
A_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 702 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ : Dict = "pt"
elif is_tf_available():
A_ : Union[str, Any] = "tf"
else:
A_ : List[str] = "jax"
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = PerceiverTokenizer
lowerCamelCase__ : Optional[Any] = False
def a__ (self ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : int = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ (self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_=False, lowerCamelCase_=2_0, lowerCamelCase_=5 ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = []
for i in range(len(lowerCamelCase_ ) ):
try:
lowerCamelCase__ : Any = tokenizer.decode([i], clean_up_tokenization_spaces=lowerCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : Any = list(filter(lambda lowerCamelCase_ : re.match(r'^[ a-zA-Z]+$', t[1] ), lowerCamelCase_ ) )
lowerCamelCase__ : Union[str, Any] = list(filter(lambda lowerCamelCase_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=lowerCamelCase_ ), lowerCamelCase_ ) )
if max_length is not None and len(lowerCamelCase_ ) > max_length:
lowerCamelCase__ : int = toks[:max_length]
if min_length is not None and len(lowerCamelCase_ ) < min_length and len(lowerCamelCase_ ) > 0:
while len(lowerCamelCase_ ) < min_length:
lowerCamelCase__ : Dict = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : int = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : Optional[int] = tokenizer.decode(lowerCamelCase_, clean_up_tokenization_spaces=lowerCamelCase_ )
if " " not in output_txt and len(lowerCamelCase_ ) > 1:
lowerCamelCase__ : List[Any] = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=lowerCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=lowerCamelCase_ )
)
if with_prefix_space:
lowerCamelCase__ : Optional[Any] = ' ' + output_txt
lowerCamelCase__ : List[Any] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
return output_txt, output_ids
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = 'Unicode €.'
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_ )
lowerCamelCase__ : Dict = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : int = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]Unicode €.[SEP]' )
lowerCamelCase__ : List[str] = tokenizer('e è é ê ë' )
lowerCamelCase__ : Dict = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : Any = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ), '[CLS]e è é ê ë[SEP]' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
lowerCamelCase__ : List[Any] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
if FRAMEWORK != "jax":
lowerCamelCase__ : List[str] = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : int = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertEqual((2, 3_8), batch.input_ids.shape )
self.assertEqual((2, 3_8), batch.attention_mask.shape )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.perceiver_tokenizer
lowerCamelCase__ : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCamelCase__ : List[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids', lowerCamelCase_ )
self.assertIn('attention_mask', lowerCamelCase_ )
self.assertNotIn('decoder_input_ids', lowerCamelCase_ )
self.assertNotIn('decoder_attention_mask', lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : int = [
'Summary of the text.',
'Another summary.',
]
lowerCamelCase__ : str = tokenizer(
text_target=lowerCamelCase_, max_length=3_2, padding='max_length', truncation=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertEqual(3_2, targets['input_ids'].shape[1] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 4_2 )
# Now let's start the test
lowerCamelCase__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : str = ' He is very happy, UNwant\u00E9d,running'
lowerCamelCase__ : str = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : str = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
shutil.rmtree(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
lowerCamelCase__ : List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCamelCase__ : List[str] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : int = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertIn('new_additional_special_token', after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 4_2 )
lowerCamelCase__ : List[Any] = tokenizer.__class__.from_pretrained(lowerCamelCase_, model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length, 4_3 )
shutil.rmtree(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : Optional[Any] = json.load(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : List[str] = json.load(lowerCamelCase_ )
lowerCamelCase__ : Any = [f'''<extra_id_{i}>''' for i in range(1_2_5 )]
lowerCamelCase__ : Optional[int] = added_tokens_extra_ids + [
'an_additional_special_token'
]
lowerCamelCase__ : List[str] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : Dict = tokenizer_class.from_pretrained(
lowerCamelCase_, )
self.assertIn(
'an_additional_special_token', tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token', lstrip=lowerCamelCase_ )]
lowerCamelCase__ : Any = tokenizer_class.from_pretrained(
lowerCamelCase_, additional_special_tokens=lowerCamelCase_, )
self.assertIn('a_new_additional_special_token', tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ), )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ), '�' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.get_tokenizers(fast=lowerCamelCase_, do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : Tuple = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
lowerCamelCase__ : List[str] = tokenizer.convert_tokens_to_string(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : int = len(_lowerCamelCase )
lowerCamelCase__ : int = len(_lowerCamelCase )
lowerCamelCase__ : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
lowerCamelCase__ : list = []
for char_count in range(_lowerCamelCase ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 703 |
"""simple docstring"""
from math import pi, sqrt, tan
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCamelCase__ : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_lowerCamelCase , 2 ) * torus_radius * tube_radius
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCamelCase__ : Dict = (sidea + sidea + sidea) / 2
lowerCamelCase__ : str = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 696 | 0 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class a_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = StableDiffusionControlNetImgaImgPipeline
lowerCamelCase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCamelCase__ : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} )
lowerCamelCase__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : str = UNetaDConditionModel(
block_out_channels=(3_2, 6_4), layers_per_block=2, sample_size=3_2, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=3_2, )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = ControlNetModel(
block_out_channels=(3_2, 6_4), layers_per_block=2, in_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), cross_attention_dim=3_2, conditioning_embedding_out_channels=(1_6, 3_2), )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = DDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='scaled_linear', clip_sample=lowerCamelCase_, set_alpha_to_one=lowerCamelCase_, )
torch.manual_seed(0 )
lowerCamelCase__ : List[str] = AutoencoderKL(
block_out_channels=[3_2, 6_4], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=3_2, intermediate_size=3_7, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_0_0_0, )
lowerCamelCase__ : Optional[Any] = CLIPTextModel(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase__ : Optional[int] = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : List[Any] = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : Union[str, Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : Optional[Any] = randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor), generator=lowerCamelCase_, device=torch.device(lowerCamelCase_ ), )
lowerCamelCase__ : List[str] = floats_tensor(control_image.shape, rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Dict = image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase__ : List[str] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((6_4, 6_4) )
lowerCamelCase__ : Optional[int] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def a__ (self ):
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', )
def a__ (self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def a__ (self ):
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : List[str] = StableDiffusionControlNetImgaImgPipeline
lowerCamelCase__ : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCamelCase__ : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase__ : Tuple = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Dict = UNetaDConditionModel(
block_out_channels=(3_2, 6_4), layers_per_block=2, sample_size=3_2, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=3_2, )
torch.manual_seed(0 )
def init_weights(lowerCamelCase_ ):
if isinstance(lowerCamelCase_, torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowerCamelCase__ : List[str] = ControlNetModel(
block_out_channels=(3_2, 6_4), layers_per_block=2, in_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), cross_attention_dim=3_2, conditioning_embedding_out_channels=(1_6, 3_2), )
controlneta.controlnet_down_blocks.apply(lowerCamelCase_ )
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = ControlNetModel(
block_out_channels=(3_2, 6_4), layers_per_block=2, in_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), cross_attention_dim=3_2, conditioning_embedding_out_channels=(1_6, 3_2), )
controlneta.controlnet_down_blocks.apply(lowerCamelCase_ )
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = DDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='scaled_linear', clip_sample=lowerCamelCase_, set_alpha_to_one=lowerCamelCase_, )
torch.manual_seed(0 )
lowerCamelCase__ : Any = AutoencoderKL(
block_out_channels=[3_2, 6_4], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=3_2, intermediate_size=3_7, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_0_0_0, )
lowerCamelCase__ : Tuple = CLIPTextModel(lowerCamelCase_ )
lowerCamelCase__ : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase__ : Tuple = MultiControlNetModel([controlneta, controlneta] )
lowerCamelCase__ : Union[str, Any] = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : Dict = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = 2
lowerCamelCase__ : Dict = [
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor), generator=lowerCamelCase_, device=torch.device(lowerCamelCase_ ), ),
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor), generator=lowerCamelCase_, device=torch.device(lowerCamelCase_ ), ),
]
lowerCamelCase__ : Any = floats_tensor(control_image[0].shape, rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Dict = image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase__ : List[Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((6_4, 6_4) )
lowerCamelCase__ : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.get_dummy_components()
lowerCamelCase__ : List[str] = self.pipeline_class(**lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
lowerCamelCase__ : Tuple = 10.0
lowerCamelCase__ : Union[str, Any] = 4
lowerCamelCase__ : Any = self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = steps
lowerCamelCase__ : int = scale
lowerCamelCase__ : Any = pipe(**lowerCamelCase_ )[0]
lowerCamelCase__ : int = self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = steps
lowerCamelCase__ : Union[str, Any] = scale
lowerCamelCase__ : Union[str, Any] = pipe(**lowerCamelCase_, control_guidance_start=0.1, control_guidance_end=0.2 )[0]
lowerCamelCase__ : Tuple = self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : int = steps
lowerCamelCase__ : Any = scale
lowerCamelCase__ : Union[str, Any] = pipe(**lowerCamelCase_, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7] )[0]
lowerCamelCase__ : Tuple = self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : Dict = steps
lowerCamelCase__ : List[str] = scale
lowerCamelCase__ : Any = pipe(**lowerCamelCase_, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def a__ (self ):
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', )
def a__ (self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def a__ (self ):
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.get_dummy_components()
lowerCamelCase__ : Union[str, Any] = self.pipeline_class(**lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowerCamelCase_ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
lowerCamelCase__ : str = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', safety_checker=lowerCamelCase_, controlnet=lowerCamelCase_ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ : List[str] = 'evil space-punk bird'
lowerCamelCase__ : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((5_1_2, 5_1_2) )
lowerCamelCase__ : Optional[int] = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((5_1_2, 5_1_2) )
lowerCamelCase__ : Union[str, Any] = pipe(
lowerCamelCase_, lowerCamelCase_, control_image=lowerCamelCase_, generator=lowerCamelCase_, output_type='np', num_inference_steps=5_0, strength=0.6, )
lowerCamelCase__ : int = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
lowerCamelCase__ : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9e-2
| 704 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : str = use_input_mask
lowerCamelCase__ : Optional[Any] = use_token_type_ids
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Optional[int] = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Dict = type_vocab_size
lowerCamelCase__ : Union[str, Any] = type_sequence_label_size
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : Union[str, Any] = num_choices
lowerCamelCase__ : List[str] = scope
lowerCamelCase__ : Dict = vocab_size - 1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Optional[Any] = None
if self.use_input_mask:
lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = self.get_config()
return config, input_ids, input_mask, token_labels
def a__ (self ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] = True
return config, input_ids, input_mask, token_labels
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = GPTNeoXModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.num_labels
lowerCamelCase__ : Optional[Any] = GPTNeoXForQuestionAnswering(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = self.num_labels
lowerCamelCase__ : Optional[int] = GPTNeoXForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[Any] = GPTNeoXForTokenClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Tuple = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# first forward pass
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, use_cache=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ : str = ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : List[Any] = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
lowerCamelCase__ : Tuple = torch.cat([input_ids, next_tokens], dim=-1 )
lowerCamelCase__ : Tuple = torch.cat([input_mask, next_mask], dim=-1 )
lowerCamelCase__ : List[str] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, output_hidden_states=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = output_from_no_past['hidden_states'][0]
lowerCamelCase__ : Optional[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, past_key_values=lowerCamelCase_, output_hidden_states=lowerCamelCase_, )['hidden_states'][0]
# select random slice
lowerCamelCase__ : Dict = ids_tensor((1,), output_from_past.shape[-1] ).item()
lowerCamelCase__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-3 ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = config_and_inputs
lowerCamelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : int = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ : Dict = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = GPTNeoXModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=6_4, num_attention_heads=8 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCamelCase__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def a__ (self ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[Any] = ids_tensor([1, 1_0], config.vocab_size )
lowerCamelCase__ : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Any = GPTNeoXModel(lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
original_model.eval()
lowerCamelCase__ : List[Any] = original_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = original_model(lowerCamelCase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Optional[int] = {'type': scaling_type, 'factor': 10.0}
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
scaled_model.to(lowerCamelCase_ )
scaled_model.eval()
lowerCamelCase__ : Tuple = scaled_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = scaled_model(lowerCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
lowerCamelCase__ : Optional[Any] = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = tokenizer('My favorite food is', return_tensors='pt' ).to(lowerCamelCase_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
lowerCamelCase__ : Dict = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
lowerCamelCase__ : Dict = model.generate(**lowerCamelCase_, do_sample=lowerCamelCase_, max_new_tokens=2_0 )
lowerCamelCase__ : Optional[Any] = tokenizer.batch_decode(lowerCamelCase_ )[0]
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class a_ ( snake_case_ ):
'''simple docstring'''
@require_torch
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
lowerCamelCase__ : Union[str, Any] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
lowerCamelCase__ : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
lowerCamelCase__ : Optional[Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCamelCase_ )
BertModel.from_pretrained(lowerCamelCase_ )
BertTokenizer.from_pretrained(lowerCamelCase_ )
pipeline(task='fill-mask', model=lowerCamelCase_ )
# baseline - just load from_pretrained with normal network
lowerCamelCase__ : Dict = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
lowerCamelCase__ : Any = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowerCamelCase__ : List[Any] = '1'
lowerCamelCase__ : List[Any] = subprocess.run(lowerCamelCase_, env=lowerCamelCase_, check=lowerCamelCase_, capture_output=lowerCamelCase_ )
self.assertEqual(result.returncode, 0, result.stderr )
self.assertIn('success', result.stdout.decode() )
@require_torch
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
lowerCamelCase__ : Dict = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
lowerCamelCase__ : Union[str, Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
lowerCamelCase__ : Optional[Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCamelCase_ )
BertModel.from_pretrained(lowerCamelCase_ )
BertTokenizer.from_pretrained(lowerCamelCase_ )
pipeline(task='fill-mask', model=lowerCamelCase_ )
# baseline - just load from_pretrained with normal network
lowerCamelCase__ : List[Any] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
lowerCamelCase__ : Union[str, Any] = self.get_env()
lowerCamelCase__ : List[Any] = subprocess.run(lowerCamelCase_, env=lowerCamelCase_, check=lowerCamelCase_, capture_output=lowerCamelCase_ )
self.assertEqual(result.returncode, 0, result.stderr )
self.assertIn('success', result.stdout.decode() )
@require_torch
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
lowerCamelCase__ : int = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
lowerCamelCase__ : List[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
lowerCamelCase__ : List[Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
lowerCamelCase__ : List[Any] = self.get_env()
lowerCamelCase__ : int = subprocess.run(lowerCamelCase_, env=lowerCamelCase_, check=lowerCamelCase_, capture_output=lowerCamelCase_ )
self.assertEqual(result.returncode, 0, result.stderr )
self.assertIn('success', result.stdout.decode() )
# next emulate no network
lowerCamelCase__ : Optional[int] = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowerCamelCase__ : List[str] = '1'
lowerCamelCase__ : str = subprocess.run(lowerCamelCase_, env=lowerCamelCase_, check=lowerCamelCase_, capture_output=lowerCamelCase_ )
self.assertEqual(result.returncode, 0, result.stderr )
self.assertIn('success', result.stdout.decode() )
@require_torch
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = '\nfrom transformers import pipeline\n '
lowerCamelCase__ : int = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
lowerCamelCase__ : Union[str, Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
lowerCamelCase__ : List[str] = self.get_env()
lowerCamelCase__ : List[str] = '1'
lowerCamelCase__ : List[Any] = [sys.executable, '-c', '\n'.join([load, mock, run] )]
lowerCamelCase__ : Any = subprocess.run(lowerCamelCase_, env=lowerCamelCase_, check=lowerCamelCase_, capture_output=lowerCamelCase_ )
self.assertEqual(result.returncode, 1, result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode', result.stderr.decode().replace('\n', '' ), )
@require_torch
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = '\nfrom transformers import AutoModel\n '
lowerCamelCase__ : List[Any] = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
lowerCamelCase__ : List[Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
lowerCamelCase__ : int = self.get_env()
lowerCamelCase__ : Optional[Any] = subprocess.run(lowerCamelCase_, env=lowerCamelCase_, check=lowerCamelCase_, capture_output=lowerCamelCase_ )
self.assertEqual(result.returncode, 0, result.stderr )
self.assertIn('success', result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowerCamelCase__ : List[str] = '1'
lowerCamelCase__ : Optional[int] = subprocess.run(lowerCamelCase_, env=lowerCamelCase_, check=lowerCamelCase_, capture_output=lowerCamelCase_ )
self.assertEqual(result.returncode, 0, result.stderr )
self.assertIn('success', result.stdout.decode() )
| 705 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A_ : Dict = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
A_ : List[Any] = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
A_ : Union[str, Any] = spec.loader.load_module()
A_ : int = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A_ : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
A_ : str = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def lowerCamelCase_ ( ):
lowerCamelCase__ : Dict = []
for config_class in list(CONFIG_MAPPING.values() ):
lowerCamelCase__ : Dict = False
# source code of `config_class`
lowerCamelCase__ : str = inspect.getsource(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = _re_checkpoint.findall(_lowerCamelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
lowerCamelCase__ : Any = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
lowerCamelCase__ : Any = True
break
lowerCamelCase__ : Dict = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowerCamelCase__ : Optional[Any] = '\n'.join(sorted(_lowerCamelCase ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 696 | 0 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
A_ : Union[str, Any] = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if got_ver is None or want_ver is None:
raise ValueError(
f'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
f''' reinstalling {pkg}.''' )
if not ops[op](version.parse(_lowerCamelCase ) , version.parse(_lowerCamelCase ) ):
raise ImportError(
f'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase = None ):
lowerCamelCase__ : Optional[Any] = f'''\n{hint}''' if hint is not None else ''
# non-versioned check
if re.match(r'^[\w_\-\d]+$' , _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = requirement, None, None
else:
lowerCamelCase__ : List[str] = re.findall(r'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , _lowerCamelCase )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
f''' got {requirement}''' )
lowerCamelCase__ : List[str] = match[0]
lowerCamelCase__ : List[Any] = want_full.split(',' ) # there could be multiple requirements
lowerCamelCase__ : Any = {}
for w in want_range:
lowerCamelCase__ : Optional[Any] = re.findall(r'^([\s!=<>]{1,2})(.+)' , _lowerCamelCase )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
f''' but got {requirement}''' )
lowerCamelCase__ : List[str] = match[0]
lowerCamelCase__ : List[str] = want_ver
if op not in ops:
raise ValueError(f'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
lowerCamelCase__ : str = '.'.join([str(_lowerCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return
# check if any version is installed
try:
lowerCamelCase__ : int = importlib.metadata.version(_lowerCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(_lowerCamelCase , _lowerCamelCase )
| 706 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Tuple = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 696 | 0 |
"""simple docstring"""
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def lowerCamelCase_ ( _lowerCamelCase ):
'''simple docstring'''
if not is_accelerate_available():
return method
lowerCamelCase__ : int = version.parse(accelerate.__version__ ).base_version
if version.parse(_lowerCamelCase ) < version.parse('0.17.0' ):
return method
def wrapper(self , *_lowerCamelCase , **_lowerCamelCase ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *_lowerCamelCase , **_lowerCamelCase )
return wrapper
| 707 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
A_ : Optional[int] = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
A_ : List[str] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
A_ : Tuple = BeautifulSoup(res.text, "html.parser")
A_ : Dict = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f"https://google.com{link.get('href')}")
| 696 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase = 6008_5147_5143 ):
try:
lowerCamelCase__ : Tuple = int(_lowerCamelCase )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ : str = 2
while i * i <= n:
while n % i == 0:
lowerCamelCase__ : Dict = i
n //= i
i += 1
if n > 1:
lowerCamelCase__ : List[str] = n
return int(_lowerCamelCase )
if __name__ == "__main__":
print(f"{solution() = }")
| 708 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowerCamelCase__ : Tuple = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_, cache_dir=lowerCamelCase_ )
lowerCamelCase__ : List[str] = [t[-1] for t in os.walk(os.path.join(lowerCamelCase_, os.listdir(lowerCamelCase_ )[0], 'snapshots' ) )]
lowerCamelCase__ : Optional[int] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[int] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Any = 4
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : List[Any] = num_samples * [prompt]
lowerCamelCase__ : Optional[int] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : int = replicate(lowerCamelCase_ )
lowerCamelCase__ : Any = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : int = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1_514_745 ) < 1e-3
assert np.abs(np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 49_947.875 ) < 5e-1
lowerCamelCase__ : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase_ ) == num_samples
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='flax', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : int = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[str] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : List[str] = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = shard(lowerCamelCase_ )
lowerCamelCase__ : str = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05_652_401) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_383_808.2) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Union[str, Any] = 5_0
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : Tuple = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Any = replicate(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : int = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa )
lowerCamelCase__ : Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Union[str, Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Optional[Any] = 5_0
lowerCamelCase__ : Tuple = jax.device_count()
lowerCamelCase__ : Optional[int] = num_samples * [prompt]
lowerCamelCase__ : str = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Optional[int] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : List[str] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = FlaxDDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='scaled_linear', set_alpha_to_one=lowerCamelCase_, steps_offset=1, )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, scheduler=lowerCamelCase_, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : List[str] = scheduler.create_state()
lowerCamelCase__ : int = scheduler_state
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : Optional[Any] = jax.device_count()
lowerCamelCase__ : Any = num_samples * [prompt]
lowerCamelCase__ : Any = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Dict = shard(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045_043_945) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_347_693.5) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : int = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : str = jax.random.split(jax.random.PRNGKey(0 ), lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Dict = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Tuple = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : int = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
lowerCamelCase__ , lowerCamelCase__ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, use_memory_efficient_attention=lowerCamelCase_, )
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : Any = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : Any = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 696 | 0 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
A_ : Optional[Any] = logging.get_logger(__name__)
A_ : List[Any] = {
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/config.json",
# See all BART models at https://huggingface.co/models?filter=bart
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = 'bart'
lowerCamelCase__ : Dict = ['past_key_values']
lowerCamelCase__ : Union[str, Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self, lowerCamelCase_=5_0_2_6_5, lowerCamelCase_=1_0_2_4, lowerCamelCase_=1_2, lowerCamelCase_=4_0_9_6, lowerCamelCase_=1_6, lowerCamelCase_=1_2, lowerCamelCase_=4_0_9_6, lowerCamelCase_=1_6, lowerCamelCase_=0.0, lowerCamelCase_=0.0, lowerCamelCase_="gelu", lowerCamelCase_=1_0_2_4, lowerCamelCase_=0.1, lowerCamelCase_=0.0, lowerCamelCase_=0.0, lowerCamelCase_=0.02, lowerCamelCase_=0.0, lowerCamelCase_=False, lowerCamelCase_=True, lowerCamelCase_=3, lowerCamelCase_=1, lowerCamelCase_=0, lowerCamelCase_=2, lowerCamelCase_=True, lowerCamelCase_=2, lowerCamelCase_=2, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Dict = d_model
lowerCamelCase__ : Dict = encoder_ffn_dim
lowerCamelCase__ : str = encoder_layers
lowerCamelCase__ : Any = encoder_attention_heads
lowerCamelCase__ : List[str] = decoder_ffn_dim
lowerCamelCase__ : int = decoder_layers
lowerCamelCase__ : str = decoder_attention_heads
lowerCamelCase__ : Optional[Any] = dropout
lowerCamelCase__ : Union[str, Any] = attention_dropout
lowerCamelCase__ : Any = activation_dropout
lowerCamelCase__ : List[str] = activation_function
lowerCamelCase__ : List[str] = init_std
lowerCamelCase__ : List[Any] = encoder_layerdrop
lowerCamelCase__ : Tuple = decoder_layerdrop
lowerCamelCase__ : int = classifier_dropout
lowerCamelCase__ : str = use_cache
lowerCamelCase__ : Optional[Any] = encoder_layers
lowerCamelCase__ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=lowerCamelCase_, pad_token_id=lowerCamelCase_, bos_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, is_encoder_decoder=lowerCamelCase_, decoder_start_token_id=lowerCamelCase_, forced_eos_token_id=lowerCamelCase_, **lowerCamelCase_, )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated', lowerCamelCase_ ):
lowerCamelCase__ : str = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'The config can simply be saved and uploaded again to be fixed.' )
class a_ ( snake_case_ ):
'''simple docstring'''
@property
def a__ (self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : List[Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
lowerCamelCase__ : Optional[int] = {0: 'batch'}
lowerCamelCase__ : Optional[Any] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowerCamelCase__ : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
lowerCamelCase__ : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_, direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCamelCase__ : Dict = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
lowerCamelCase__ : Dict = self.num_layers
for i in range(lowerCamelCase_ ):
lowerCamelCase__ : Tuple = {0: 'batch', 2: 'past_sequence + sequence'}
lowerCamelCase__ : Optional[Any] = {0: 'batch', 2: 'past_sequence + sequence'}
else:
lowerCamelCase__ : Tuple = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def a__ (self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : List[str] = super().outputs
else:
lowerCamelCase__ : Optional[int] = super(lowerCamelCase_, self ).outputs
if self.use_past:
lowerCamelCase__ : int = self.num_layers
for i in range(lowerCamelCase_ ):
lowerCamelCase__ : List[str] = {0: 'batch', 2: 'past_sequence + sequence'}
lowerCamelCase__ : Any = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def a__ (self, lowerCamelCase_, lowerCamelCase_ = -1, lowerCamelCase_ = -1, lowerCamelCase_ = False, lowerCamelCase_ = None, ):
'''simple docstring'''
lowerCamelCase__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# Generate decoder inputs
lowerCamelCase__ : str = seq_length if not self.use_past else 1
lowerCamelCase__ : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Any = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
lowerCamelCase__ : int = dict(**lowerCamelCase_, **lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
lowerCamelCase__ : Union[str, Any] = common_inputs['input_ids'].shape
lowerCamelCase__ : Optional[Any] = common_inputs['decoder_input_ids'].shape[1]
lowerCamelCase__ : Optional[Any] = self.num_attention_heads
lowerCamelCase__ : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ : Any = decoder_seq_length + 3
lowerCamelCase__ : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCamelCase__ : Optional[Any] = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(lowerCamelCase_, lowerCamelCase_ )], dim=1 )
lowerCamelCase__ : int = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCamelCase__ : Union[str, Any] = self.num_layers
lowerCamelCase__ : Any = min(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : int = max(lowerCamelCase_, lowerCamelCase_ ) - min_num_layers
lowerCamelCase__ : Optional[int] = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(lowerCamelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
) )
# TODO: test this.
lowerCamelCase__ : str = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(lowerCamelCase_, lowerCamelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) )
return common_inputs
def a__ (self, lowerCamelCase_, lowerCamelCase_ = -1, lowerCamelCase_ = -1, lowerCamelCase_ = False, lowerCamelCase_ = None, ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
lowerCamelCase__ : Any = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
lowerCamelCase__ : Dict = seqlen + 2
lowerCamelCase__ : Union[str, Any] = self.num_layers
lowerCamelCase__ : Optional[int] = self.num_attention_heads
lowerCamelCase__ : int = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ : List[str] = common_inputs['attention_mask'].dtype
lowerCamelCase__ : List[str] = torch.cat(
[common_inputs['attention_mask'], torch.ones(lowerCamelCase_, lowerCamelCase_, dtype=lowerCamelCase_ )], dim=1 )
lowerCamelCase__ : List[Any] = [
(torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) for _ in range(lowerCamelCase_ )
]
return common_inputs
def a__ (self, lowerCamelCase_, lowerCamelCase_ = -1, lowerCamelCase_ = -1, lowerCamelCase_ = False, lowerCamelCase_ = None, ):
'''simple docstring'''
lowerCamelCase__ : Dict = compute_effective_axis_dimension(
lowerCamelCase_, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCamelCase__ : List[str] = tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = compute_effective_axis_dimension(
lowerCamelCase_, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
lowerCamelCase__ : List[Any] = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCamelCase__ : Tuple = dict(tokenizer(lowerCamelCase_, return_tensors=lowerCamelCase_ ) )
return common_inputs
def a__ (self, lowerCamelCase_, lowerCamelCase_ = -1, lowerCamelCase_ = -1, lowerCamelCase_ = False, lowerCamelCase_ = None, ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : List[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase_, batch_size=lowerCamelCase_, seq_length=lowerCamelCase_, is_pair=lowerCamelCase_, framework=lowerCamelCase_ )
elif self.task == "causal-lm":
lowerCamelCase__ : List[str] = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase_, batch_size=lowerCamelCase_, seq_length=lowerCamelCase_, is_pair=lowerCamelCase_, framework=lowerCamelCase_ )
else:
lowerCamelCase__ : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_, batch_size=lowerCamelCase_, seq_length=lowerCamelCase_, is_pair=lowerCamelCase_, framework=lowerCamelCase_ )
return common_inputs
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : str = super()._flatten_past_key_values_(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
else:
lowerCamelCase__ : Any = super(lowerCamelCase_, self )._flatten_past_key_values_(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
| 709 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
A_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_, scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__(self, lowerCamelCase_ = 1, lowerCamelCase_ = 1_0_0, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = True, ):
'''simple docstring'''
if audio_length_in_s is None:
lowerCamelCase__ : str = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCamelCase__ : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate
lowerCamelCase__ : str = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowerCamelCase__ : Dict = int(lowerCamelCase_ )
if sample_size % down_scale_factor != 0:
lowerCamelCase__ : Union[str, Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
' process.' )
lowerCamelCase__ : Optional[Any] = int(lowerCamelCase_ )
lowerCamelCase__ : List[str] = next(iter(self.unet.parameters() ) ).dtype
lowerCamelCase__ : Union[str, Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase_, lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase__ : Union[str, Any] = randn_tensor(lowerCamelCase_, generator=lowerCamelCase_, device=self.device, dtype=lowerCamelCase_ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_, device=audio.device )
lowerCamelCase__ : int = self.scheduler.timesteps.to(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase__ : List[Any] = self.unet(lowerCamelCase_, lowerCamelCase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCamelCase__ : List[str] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Union[str, Any] = audio.clamp(-1, 1 ).float().cpu().numpy()
lowerCamelCase__ : Tuple = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Optional[Any] = logging.get_logger(__name__)
A_ : str = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = 'yolos'
def __init__(self, lowerCamelCase_=7_6_8, lowerCamelCase_=1_2, lowerCamelCase_=1_2, lowerCamelCase_=3_0_7_2, lowerCamelCase_="gelu", lowerCamelCase_=0.0, lowerCamelCase_=0.0, lowerCamelCase_=0.02, lowerCamelCase_=1e-12, lowerCamelCase_=[5_1_2, 8_6_4], lowerCamelCase_=1_6, lowerCamelCase_=3, lowerCamelCase_=True, lowerCamelCase_=1_0_0, lowerCamelCase_=True, lowerCamelCase_=False, lowerCamelCase_=1, lowerCamelCase_=5, lowerCamelCase_=2, lowerCamelCase_=5, lowerCamelCase_=2, lowerCamelCase_=0.1, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : Tuple = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : Tuple = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : str = initializer_range
lowerCamelCase__ : Union[str, Any] = layer_norm_eps
lowerCamelCase__ : List[str] = image_size
lowerCamelCase__ : Any = patch_size
lowerCamelCase__ : Dict = num_channels
lowerCamelCase__ : List[Any] = qkv_bias
lowerCamelCase__ : List[Any] = num_detection_tokens
lowerCamelCase__ : List[str] = use_mid_position_embeddings
lowerCamelCase__ : int = auxiliary_loss
# Hungarian matcher
lowerCamelCase__ : Tuple = class_cost
lowerCamelCase__ : Union[str, Any] = bbox_cost
lowerCamelCase__ : List[Any] = giou_cost
# Loss coefficients
lowerCamelCase__ : Union[str, Any] = bbox_loss_coefficient
lowerCamelCase__ : Union[str, Any] = giou_loss_coefficient
lowerCamelCase__ : str = eos_coefficient
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = version.parse('1.11' )
@property
def a__ (self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def a__ (self ):
'''simple docstring'''
return 1e-4
@property
def a__ (self ):
'''simple docstring'''
return 1_2
| 710 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(lowerCamelCase_ ) )
vocab_file.flush()
lowerCamelCase__ : Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase__ : Optional[Any] = BertModel(BertConfig(vocab_size=len(lowerCamelCase_ ) ) )
model.save_pretrained(lowerCamelCase_ )
self._test_export(lowerCamelCase_, 'pt', 1_2, lowerCamelCase_ )
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Optional[Any] = self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Any = quantize(Path(lowerCamelCase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Any = self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = quantize(lowerCamelCase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase__ : str = Path(lowerCamelCase_ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ )
return path
except Exception as e:
self.fail(lowerCamelCase_ )
@require_torch
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : str = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'pt' )
@require_tf
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import TFBertModel
lowerCamelCase__ : Dict = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Optional[int] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'tf' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = FeatureExtractionPipeline(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = infer_shapes(lowerCamelCase_, lowerCamelCase_ )
# Assert all variables are present
self.assertEqual(len(lowerCamelCase_ ), len(lowerCamelCase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3], lowerCamelCase_ )
self.assertSequenceEqual(variable_names[3:], lowerCamelCase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name], {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'], {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'], {0: 'batch'} )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['input_ids', 'attention_mask', 'token_type_ids']
lowerCamelCase__ : Optional[int] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowerCamelCase__ , lowerCamelCase__ : str = ensure_valid_input(FuncContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCamelCase_ ), 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCamelCase_ ), set(lowerCamelCase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCamelCase_, (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase__ , lowerCamelCase__ : Any = ensure_valid_input(FuncNonContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCamelCase_ ), 1 )
self.assertEqual(len(lowerCamelCase_ ), 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0], tokens['input_ids'] )
self.assertEqual(ordered_input_names[0], 'input_ids' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ), '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx', generated.as_posix() )
| 696 | 0 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ : Dict = "pt"
elif is_tf_available():
A_ : Union[str, Any] = "tf"
else:
A_ : List[str] = "jax"
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = PerceiverTokenizer
lowerCamelCase__ : Optional[Any] = False
def a__ (self ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : int = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ (self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_=False, lowerCamelCase_=2_0, lowerCamelCase_=5 ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = []
for i in range(len(lowerCamelCase_ ) ):
try:
lowerCamelCase__ : Any = tokenizer.decode([i], clean_up_tokenization_spaces=lowerCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : Any = list(filter(lambda lowerCamelCase_ : re.match(r'^[ a-zA-Z]+$', t[1] ), lowerCamelCase_ ) )
lowerCamelCase__ : Union[str, Any] = list(filter(lambda lowerCamelCase_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=lowerCamelCase_ ), lowerCamelCase_ ) )
if max_length is not None and len(lowerCamelCase_ ) > max_length:
lowerCamelCase__ : int = toks[:max_length]
if min_length is not None and len(lowerCamelCase_ ) < min_length and len(lowerCamelCase_ ) > 0:
while len(lowerCamelCase_ ) < min_length:
lowerCamelCase__ : Dict = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : int = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : Optional[int] = tokenizer.decode(lowerCamelCase_, clean_up_tokenization_spaces=lowerCamelCase_ )
if " " not in output_txt and len(lowerCamelCase_ ) > 1:
lowerCamelCase__ : List[Any] = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=lowerCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=lowerCamelCase_ )
)
if with_prefix_space:
lowerCamelCase__ : Optional[Any] = ' ' + output_txt
lowerCamelCase__ : List[Any] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
return output_txt, output_ids
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = 'Unicode €.'
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_ )
lowerCamelCase__ : Dict = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : int = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]Unicode €.[SEP]' )
lowerCamelCase__ : List[str] = tokenizer('e è é ê ë' )
lowerCamelCase__ : Dict = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : Any = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ), '[CLS]e è é ê ë[SEP]' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
lowerCamelCase__ : List[Any] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
if FRAMEWORK != "jax":
lowerCamelCase__ : List[str] = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : int = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertEqual((2, 3_8), batch.input_ids.shape )
self.assertEqual((2, 3_8), batch.attention_mask.shape )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.perceiver_tokenizer
lowerCamelCase__ : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCamelCase__ : List[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids', lowerCamelCase_ )
self.assertIn('attention_mask', lowerCamelCase_ )
self.assertNotIn('decoder_input_ids', lowerCamelCase_ )
self.assertNotIn('decoder_attention_mask', lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : int = [
'Summary of the text.',
'Another summary.',
]
lowerCamelCase__ : str = tokenizer(
text_target=lowerCamelCase_, max_length=3_2, padding='max_length', truncation=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertEqual(3_2, targets['input_ids'].shape[1] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 4_2 )
# Now let's start the test
lowerCamelCase__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : str = ' He is very happy, UNwant\u00E9d,running'
lowerCamelCase__ : str = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : str = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
shutil.rmtree(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
lowerCamelCase__ : List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCamelCase__ : List[str] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : int = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertIn('new_additional_special_token', after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 4_2 )
lowerCamelCase__ : List[Any] = tokenizer.__class__.from_pretrained(lowerCamelCase_, model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length, 4_3 )
shutil.rmtree(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : Optional[Any] = json.load(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : List[str] = json.load(lowerCamelCase_ )
lowerCamelCase__ : Any = [f'''<extra_id_{i}>''' for i in range(1_2_5 )]
lowerCamelCase__ : Optional[int] = added_tokens_extra_ids + [
'an_additional_special_token'
]
lowerCamelCase__ : List[str] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : Dict = tokenizer_class.from_pretrained(
lowerCamelCase_, )
self.assertIn(
'an_additional_special_token', tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token', lstrip=lowerCamelCase_ )]
lowerCamelCase__ : Any = tokenizer_class.from_pretrained(
lowerCamelCase_, additional_special_tokens=lowerCamelCase_, )
self.assertIn('a_new_additional_special_token', tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ), )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ), '�' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.get_tokenizers(fast=lowerCamelCase_, do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : Tuple = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
lowerCamelCase__ : List[str] = tokenizer.convert_tokens_to_string(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
| 711 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = KandinskyVaaControlnetImgaImgPipeline
lowerCamelCase__ : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : Dict = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : str = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase__ : Any = False
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a__ (self ):
'''simple docstring'''
return 1_0_0
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase__ : int = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def a__ (self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.dummy_unet
lowerCamelCase__ : List[Any] = self.dummy_movq
lowerCamelCase__ : Tuple = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCamelCase__ : Optional[Any] = DDIMScheduler(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create init_image
lowerCamelCase__ : Any = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Dict = image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase__ : Optional[Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create hint
lowerCamelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : int = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : Any = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 'cpu'
lowerCamelCase__ : List[Any] = self.get_dummy_components()
lowerCamelCase__ : List[Any] = self.pipeline_class(**lowerCamelCase_ )
lowerCamelCase__ : Dict = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Any = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
lowerCamelCase__ : List[Any] = output.images
lowerCamelCase__ : str = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ), return_dict=lowerCamelCase_, )[0]
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
lowerCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase__ : List[str] = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
lowerCamelCase__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase__ : Any = init_image.resize((5_1_2, 5_1_2) )
lowerCamelCase__ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowerCamelCase__ : Any = torch.from_numpy(np.array(lowerCamelCase_ ) ).float() / 255.0
lowerCamelCase__ : Optional[int] = hint.permute(2, 0, 1 ).unsqueeze(0 )
lowerCamelCase__ : Union[str, Any] = 'A robot, 4k photo'
lowerCamelCase__ : Any = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth', torch_dtype=torch.floataa )
lowerCamelCase__ : int = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : str = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = pipe_prior(
lowerCamelCase_, image=lowerCamelCase_, strength=0.85, generator=lowerCamelCase_, negative_prompt='', ).to_tuple()
lowerCamelCase__ : Union[str, Any] = pipeline(
image=lowerCamelCase_, image_embeds=lowerCamelCase_, negative_image_embeds=lowerCamelCase_, hint=lowerCamelCase_, generator=lowerCamelCase_, num_inference_steps=1_0_0, height=5_1_2, width=5_1_2, strength=0.5, output_type='np', )
lowerCamelCase__ : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCamelCase_, lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : str = len(_lowerCamelCase )
while cur > 1:
# Find the maximum number in arr
lowerCamelCase__ : str = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCamelCase__ : List[str] = arr[mi::-1] + arr[mi + 1 : len(_lowerCamelCase )]
# Reverse whole list
lowerCamelCase__ : Any = arr[cur - 1 :: -1] + arr[cur : len(_lowerCamelCase )]
cur -= 1
return arr
if __name__ == "__main__":
A_ : Optional[Any] = input("Enter numbers separated by a comma:\n").strip()
A_ : Tuple = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 712 |
"""simple docstring"""
A_ : List[str] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 696 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase = 3 , _lowerCamelCase = 7 , _lowerCamelCase = 100_0000 ):
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Optional[Any] = 1
for current_denominator in range(1 , limit + 1 ):
lowerCamelCase__ : Optional[int] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
lowerCamelCase__ : List[str] = current_numerator
lowerCamelCase__ : Optional[int] = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00))
| 713 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
A_ : Optional[int] = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
if group not in primes:
raise ValueError('Unsupported Group' )
lowerCamelCase__ : int = primes[group]['prime']
lowerCamelCase__ : Optional[int] = primes[group]['generator']
lowerCamelCase__ : Any = int(hexlify(urandom(3_2 ) ), base=1_6 )
def a__ (self ):
'''simple docstring'''
return hex(self.__private_key )[2:]
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = pow(self.generator, self.__private_key, self.prime )
return hex(lowerCamelCase_ )[2:]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase_, (self.prime - 1) // 2, self.prime ) == 1
)
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = int(lowerCamelCase_, base=1_6 )
if not self.is_valid_public_key(lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Tuple = pow(lowerCamelCase_, self.__private_key, self.prime )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase_, (prime - 1) // 2, lowerCamelCase_ ) == 1
)
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
lowerCamelCase__ : Dict = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[Any] = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[str] = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase_, lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Dict = pow(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 0 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
A_ : Any = "__DUMMY_TRANSFORMERS_USER__"
A_ : Tuple = "Dummy User"
A_ : str = "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"
A_ : List[str] = "https://hub-ci.huggingface.co"
A_ : Optional[int] = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
A_ : int = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
A_ : Any = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def lowerCamelCase_ ( _lowerCamelCase ):
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , _lowerCamelCase )
@pytest.fixture
def lowerCamelCase_ ( _lowerCamelCase ):
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , _lowerCamelCase )
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , _lowerCamelCase )
@pytest.fixture
def lowerCamelCase_ ( _lowerCamelCase ):
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , _lowerCamelCase )
@pytest.fixture
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
HfFolder.save_token(_lowerCamelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope='session' )
def lowerCamelCase_ ( ):
return HfApi(endpoint=_lowerCamelCase )
@pytest.fixture(scope='session' )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = HfFolder.get_token()
HfFolder.save_token(_lowerCamelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_lowerCamelCase )
@pytest.fixture
def lowerCamelCase_ ( _lowerCamelCase ):
def _cleanup_repo(_lowerCamelCase ):
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type='dataset' )
return _cleanup_repo
@pytest.fixture
def lowerCamelCase_ ( _lowerCamelCase ):
@contextmanager
def _temporary_repo(_lowerCamelCase ):
try:
yield repo_id
finally:
cleanup_repo(_lowerCamelCase )
return _temporary_repo
@pytest.fixture(scope='session' )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = f'''repo_txt_data-{int(time.time() * 10e3 )}'''
lowerCamelCase__ : Any = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type='dataset' , private=_lowerCamelCase )
hf_api.upload_file(
token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo='data/text_data.txt' , repo_id=_lowerCamelCase , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session' )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = f'''repo_zipped_txt_data-{int(time.time() * 10e3 )}'''
lowerCamelCase__ : int = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type='dataset' , private=_lowerCamelCase )
hf_api.upload_file(
token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo='data.zip' , repo_id=_lowerCamelCase , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session' )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = f'''repo_zipped_img_data-{int(time.time() * 10e3 )}'''
lowerCamelCase__ : Union[str, Any] = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type='dataset' , private=_lowerCamelCase )
hf_api.upload_file(
token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo='data.zip' , repo_id=_lowerCamelCase , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
return hf_private_dataset_repo_zipped_img_data_
| 714 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(_lowerCamelCase ) * abs(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 696 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : str = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
| 715 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : int = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : Optional[int] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : str = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : Optional[int] = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Tuple = c.n_embd + 1 # int
lowerCamelCase__ : Union[str, Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : List[Any] = not c.scale_attn_weights # bool
lowerCamelCase__ : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = PretrainedConfig()
lowerCamelCase__ : Optional[Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : Any = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : int = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = mock.Mock()
lowerCamelCase__ : List[str] = 5_0_0
lowerCamelCase__ : Any = {}
lowerCamelCase__ : int = HTTPError
lowerCamelCase__ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : str = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : str = ['config.42.0.0.json']
lowerCamelCase__ : Union[str, Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Optional[int] = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Dict = 'v3.0.0'
lowerCamelCase__ : List[str] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 696 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : List[str] = logging.get_logger(__name__)
A_ : Dict = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Any = 'levit'
def __init__(self, lowerCamelCase_=2_2_4, lowerCamelCase_=3, lowerCamelCase_=3, lowerCamelCase_=2, lowerCamelCase_=1, lowerCamelCase_=1_6, lowerCamelCase_=[1_2_8, 2_5_6, 3_8_4], lowerCamelCase_=[4, 8, 1_2], lowerCamelCase_=[4, 4, 4], lowerCamelCase_=[1_6, 1_6, 1_6], lowerCamelCase_=0, lowerCamelCase_=[2, 2, 2], lowerCamelCase_=[2, 2, 2], lowerCamelCase_=0.02, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
lowerCamelCase__ : Any = image_size
lowerCamelCase__ : List[Any] = num_channels
lowerCamelCase__ : Union[str, Any] = kernel_size
lowerCamelCase__ : str = stride
lowerCamelCase__ : Union[str, Any] = padding
lowerCamelCase__ : Any = hidden_sizes
lowerCamelCase__ : Dict = num_attention_heads
lowerCamelCase__ : str = depths
lowerCamelCase__ : Any = key_dim
lowerCamelCase__ : List[str] = drop_path_rate
lowerCamelCase__ : Dict = patch_size
lowerCamelCase__ : Optional[Any] = attention_ratio
lowerCamelCase__ : Any = mlp_ratio
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : int = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = version.parse('1.11' )
@property
def a__ (self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def a__ (self ):
'''simple docstring'''
return 1e-4
| 716 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Dict = value_function
lowerCamelCase__ : int = unet
lowerCamelCase__ : Union[str, Any] = scheduler
lowerCamelCase__ : int = env
lowerCamelCase__ : List[Any] = env.get_dataset()
lowerCamelCase__ : Dict = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Optional[Any] = self.data[key].mean()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[int] = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Tuple = self.data[key].std()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[Any] = env.observation_space.shape[0]
lowerCamelCase__ : List[str] = env.action_space.shape[0]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if type(lowerCamelCase_ ) is dict:
return {k: self.to_torch(lowerCamelCase_ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase_ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase_, device=self.unet.device )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for key, val in cond.items():
lowerCamelCase__ : Optional[Any] = val.clone()
return x_in
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = x.shape[0]
lowerCamelCase__ : Tuple = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowerCamelCase__ : Dict = torch.full((batch_size,), lowerCamelCase_, device=self.unet.device, dtype=torch.long )
for _ in range(lowerCamelCase_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowerCamelCase__ : str = self.value_function(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample
lowerCamelCase__ : Union[str, Any] = torch.autograd.grad([y.sum()], [x] )[0]
lowerCamelCase__ : Optional[int] = self.scheduler._get_variance(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = torch.exp(0.5 * posterior_variance )
lowerCamelCase__ : Tuple = model_std * grad
lowerCamelCase__ : str = 0
lowerCamelCase__ : Dict = x.detach()
lowerCamelCase__ : Dict = x + scale * grad
lowerCamelCase__ : Optional[int] = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : Tuple = self.unet(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample.permute(0, 2, 1 )
# TODO: verify deprecation of this kwarg
lowerCamelCase__ : Optional[Any] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, predict_epsilon=lowerCamelCase_ )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowerCamelCase__ : Any = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
return x, y
def __call__(self, lowerCamelCase_, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=0.1 ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.normalize(lowerCamelCase_, 'observations' )
lowerCamelCase__ : List[str] = obs[None].repeat(lowerCamelCase_, axis=0 )
lowerCamelCase__ : str = {0: self.to_torch(lowerCamelCase_ )}
lowerCamelCase__ : Optional[Any] = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowerCamelCase__ : List[Any] = randn_tensor(lowerCamelCase_, device=self.unet.device )
lowerCamelCase__ : int = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
# run the diffusion process
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.run_diffusion(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# sort output trajectories by value
lowerCamelCase__ : Union[str, Any] = y.argsort(0, descending=lowerCamelCase_ ).squeeze()
lowerCamelCase__ : List[str] = x[sorted_idx]
lowerCamelCase__ : Optional[Any] = sorted_values[:, :, : self.action_dim]
lowerCamelCase__ : Union[str, Any] = actions.detach().cpu().numpy()
lowerCamelCase__ : Union[str, Any] = self.de_normalize(lowerCamelCase_, key='actions' )
# select the action with the highest value
if y is not None:
lowerCamelCase__ : str = 0
else:
# if we didn't run value guiding, select a random action
lowerCamelCase__ : Optional[Any] = np.random.randint(0, lowerCamelCase_ )
lowerCamelCase__ : Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 696 | 0 |
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowerCamelCase_ ( _lowerCamelCase = True , *_lowerCamelCase , **_lowerCamelCase ):
if not is_tqdm_available():
raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' )
lowerCamelCase__ : int = False
if main_process_only:
lowerCamelCase__ : Optional[Any] = PartialState().local_process_index == 0
return _tqdm(*_lowerCamelCase , **_lowerCamelCase , disable=_lowerCamelCase )
| 717 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = analyze_text(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
lowerCamelCase__ : List[Any] = sum(single_char_strings.values() )
# one length string
lowerCamelCase__ : str = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCamelCase__ : Tuple = single_char_strings[ch]
lowerCamelCase__ : Union[str, Any] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowerCamelCase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
lowerCamelCase__ : Dict = sum(two_char_strings.values() )
lowerCamelCase__ : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCamelCase__ : int = cha + cha
if sequence in two_char_strings:
lowerCamelCase__ : int = two_char_strings[sequence]
lowerCamelCase__ : Tuple = int(_lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(_lowerCamelCase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = Counter() # type: ignore
lowerCamelCase__ : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCamelCase_ ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 696 | 0 |
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=False, lowerCamelCase_=True, lowerCamelCase_=False, lowerCamelCase_=False, lowerCamelCase_=1_9, lowerCamelCase_=3_2, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Tuple = parent
lowerCamelCase__ : List[str] = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : str = is_training
lowerCamelCase__ : str = use_input_mask
lowerCamelCase__ : Dict = use_token_type_ids
lowerCamelCase__ : Optional[Any] = use_labels
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : List[Any] = num_hidden_layers
lowerCamelCase__ : str = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : str = type_vocab_size
lowerCamelCase__ : Dict = type_sequence_label_size
lowerCamelCase__ : List[str] = initializer_range
lowerCamelCase__ : Dict = num_labels
lowerCamelCase__ : str = num_choices
lowerCamelCase__ : Any = scope
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : List[Any] = None
if self.use_input_mask:
lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : int = None
if self.use_labels:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : Any = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : Any = ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : Tuple = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = EsmConfig(
vocab_size=3_3, hidden_size=self.hidden_size, pad_token_id=1, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, is_folding_model=lowerCamelCase_, esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False}, )
return config
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = EsmForProteinFolding(config=lowerCamelCase_ ).float()
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Any = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
lowerCamelCase__ : Any = model(lowerCamelCase_ )
lowerCamelCase__ : str = model(lowerCamelCase_ )
self.parent.assertEqual(result.positions.shape, (8, self.batch_size, self.seq_length, 1_4, 3) )
self.parent.assertEqual(result.angles.shape, (8, self.batch_size, self.seq_length, 7, 2) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.prepare_config_and_inputs()
(
lowerCamelCase__
) : str = config_and_inputs
lowerCamelCase__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : List[str] = (EsmForProteinFolding,) if is_torch_available() else ()
lowerCamelCase__ : Union[str, Any] = ()
lowerCamelCase__ : Any = {} if is_torch_available() else {}
lowerCamelCase__ : str = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = EsmFoldModelTester(self )
lowerCamelCase__ : Optional[Any] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@unittest.skip('Does not support attention outputs' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('ESMFold does not support passing input embeds!' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('ESMFold does not support head pruning.' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('ESMFold does not support head pruning.' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('ESMFold does not support head pruning.' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('ESMFold does not support head pruning.' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('ESMFold does not support head pruning.' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('ESMFold only has one output format.' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('ESMFold does not support input chunking.' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('ESMFold doesn\'t support data parallel.' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def a__ (self ):
'''simple docstring'''
pass
@require_torch
class a_ ( snake_case_ ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float()
model.eval()
lowerCamelCase__ : Optional[Any] = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
lowerCamelCase__ : Dict = model(lowerCamelCase_ )['positions']
lowerCamelCase__ : Optional[int] = torch.tensor([2.5_828, 0.7_993, -10.9_334], dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0], lowerCamelCase_, atol=1e-4 ) )
| 718 |
"""simple docstring"""
import os
def lowerCamelCase_ ( ):
with open(os.path.dirname(_lowerCamelCase ) + '/p022_names.txt' ) as file:
lowerCamelCase__ : Union[str, Any] = str(file.readlines()[0] )
lowerCamelCase__ : int = names.replace('"' , '' ).split(',' )
names.sort()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : str = 0
for i, name in enumerate(_lowerCamelCase ):
for letter in name:
name_score += ord(_lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 696 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : list[list[int]] = [[0 for _ in range(_lowerCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
lowerCamelCase__ : Any = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
A_ : Any = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
A_ : str = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 719 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : int = 'Speech2TextFeatureExtractor'
lowerCamelCase__ : Dict = 'Speech2TextTokenizer'
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
def __call__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase_, **lowerCamelCase_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowerCamelCase__ : Optional[int] = kwargs.pop('raw_speech' )
else:
lowerCamelCase__ : int = kwargs.pop('audio', lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = kwargs.pop('sampling_rate', lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = kwargs.pop('text', lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
lowerCamelCase__ : List[str] = args[0]
lowerCamelCase__ : Any = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowerCamelCase__ : Union[str, Any] = self.feature_extractor(lowerCamelCase_, *lowerCamelCase_, sampling_rate=lowerCamelCase_, **lowerCamelCase_ )
if text is not None:
lowerCamelCase__ : List[Any] = self.tokenizer(lowerCamelCase_, **lowerCamelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ : Tuple = encodings['input_ids']
return inputs
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@contextmanager
def a__ (self ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = self.tokenizer
yield
lowerCamelCase__ : Optional[int] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
| 696 | 0 |
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
A_ : List[str] = data_utils.TransfoXLTokenizer
A_ : Tuple = data_utils.TransfoXLCorpus
A_ : Any = data_utils
A_ : List[Any] = data_utils
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(_lowerCamelCase , 'rb' ) as fp:
lowerCamelCase__ : str = pickle.load(_lowerCamelCase , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
lowerCamelCase__ : str = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(f'''Save vocabulary to {pytorch_vocab_dump_path}''' )
lowerCamelCase__ : str = corpus.vocab.__dict__
torch.save(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : List[str] = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , _lowerCamelCase )
lowerCamelCase__ : Optional[int] = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(f'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(_lowerCamelCase , _lowerCamelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
lowerCamelCase__ : Optional[int] = os.path.abspath(_lowerCamelCase )
lowerCamelCase__ : Any = os.path.abspath(_lowerCamelCase )
print(f'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
lowerCamelCase__ : int = TransfoXLConfig()
else:
lowerCamelCase__ : Optional[int] = TransfoXLConfig.from_json_file(_lowerCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase__ : int = TransfoXLLMHeadModel(_lowerCamelCase )
lowerCamelCase__ : int = load_tf_weights_in_transfo_xl(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
lowerCamelCase__ : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(f'''Save PyTorch model to {os.path.abspath(_lowerCamelCase )}''' )
torch.save(model.state_dict() , _lowerCamelCase )
print(f'''Save configuration file to {os.path.abspath(_lowerCamelCase )}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
A_ : int = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 720 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : Optional[Any] = use_input_mask
lowerCamelCase__ : List[Any] = use_token_type_ids
lowerCamelCase__ : List[Any] = use_labels
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : Optional[int] = embedding_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Dict = num_choices
lowerCamelCase__ : Tuple = scope
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : List[str] = None
if self.use_input_mask:
lowerCamelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_token_type_ids:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Any = None
lowerCamelCase__ : Union[str, Any] = None
if self.use_labels:
lowerCamelCase__ : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ):
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, embedding_size=self.embedding_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, next_sentence_label=lowerCamelCase_, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, start_positions=lowerCamelCase_, end_positions=lowerCamelCase_, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : int = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.num_choices
lowerCamelCase__ : Dict = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : int = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : List[str] = config_and_inputs
lowerCamelCase__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : int = True
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
lowerCamelCase__ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase_ )
return inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = MobileBertModelTester(self )
lowerCamelCase__ : List[str] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
return torch.tensor(
_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase , )
A_ : Tuple = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(lowerCamelCase_ )
lowerCamelCase__ : Tuple = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )[0]
lowerCamelCase__ : Optional[int] = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[
[
[-2.4_736_526e07, 8.2_691_656e04, 1.6_521_838e05],
[-5.7_541_704e-01, 3.9_056_022e00, 4.4_011_507e00],
[2.6_047_359e00, 1.5_677_652e00, -1.7_324_188e-01],
]
], device=lowerCamelCase_, )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCamelCase__ : Optional[int] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCamelCase__ : Any = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 696 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError('only integers accepted as input' )
else:
lowerCamelCase__ : List[Any] = str(abs(_lowerCamelCase ) )
lowerCamelCase__ : Any = [list(_lowerCamelCase ) for char in range(len(_lowerCamelCase ) )]
for index in range(len(_lowerCamelCase ) ):
num_transpositions[index].pop(_lowerCamelCase )
return max(
int(''.join(list(_lowerCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 721 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
A_ : str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, lowerCamelCase_=1 ):
'''simple docstring'''
lowerCamelCase__ : Any = tokenizer
lowerCamelCase__ : Optional[Any] = dataset
lowerCamelCase__ : int = len(lowerCamelCase_ ) if n_tasks is None else n_tasks
lowerCamelCase__ : Any = n_copies
def __iter__(self ):
'''simple docstring'''
lowerCamelCase__ : Dict = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ : Optional[int] = self.tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = start_length
lowerCamelCase__ : List[str] = eof_strings
lowerCamelCase__ : List[str] = tokenizer
def __call__(self, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ : Optional[Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = re.split('(%s)' % '|'.join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=20 , **_lowerCamelCase ):
lowerCamelCase__ : List[str] = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
lowerCamelCase__ : str = batch['ids'].shape[-1]
lowerCamelCase__ : int = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
lowerCamelCase__ : Optional[Any] = batch['task_id'].repeat(_lowerCamelCase )
lowerCamelCase__ : List[Any] = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ : List[Any] = generated_tokens.cpu().numpy()
lowerCamelCase__ : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
lowerCamelCase__ : str = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ : Optional[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def lowerCamelCase_ ( ):
# Setup configuration
lowerCamelCase__ : int = HfArgumentParser(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ : Tuple = 'false'
if args.num_workers is None:
lowerCamelCase__ : List[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ : List[Any] = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
lowerCamelCase__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ : Optional[int] = tokenizer.eos_token
lowerCamelCase__ : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ : Optional[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ : Any = load_dataset('openai_humaneval' )
lowerCamelCase__ : Optional[int] = load_metric('code_eval' )
lowerCamelCase__ : List[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ : Optional[int] = args.n_samples // args.batch_size
lowerCamelCase__ : Tuple = TokenizedDataset(_lowerCamelCase , human_eval['test'] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ : Union[str, Any] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ : List[Any] = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ : str = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
lowerCamelCase__ : List[str] = []
for task in tqdm(range(_lowerCamelCase ) ):
lowerCamelCase__ : int = human_eval['test'][task]['test']
lowerCamelCase__ : Union[str, Any] = f'''check({human_eval['test'][task]['entry_point']})'''
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ : Any = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 696 | 0 |
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = torch.exp(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = torch.sum(_lowerCamelCase , dim=1 ) # sum of exp(x_i)
lowerCamelCase__ : Any = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowerCamelCase ) - B / A
class a_ ( nn.Module ):
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : int = config.output_attentions
lowerCamelCase__ : str = config.output_hidden_states
lowerCamelCase__ : List[Any] = nn.ModuleList([BertLayer(lowerCamelCase_ ) for _ in range(config.num_hidden_layers )] )
lowerCamelCase__ : Dict = nn.ModuleList([BertHighway(lowerCamelCase_ ) for _ in range(config.num_hidden_layers )] )
lowerCamelCase__ : str = [-1 for _ in range(config.num_hidden_layers )]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if (type(lowerCamelCase_ ) is float) or (type(lowerCamelCase_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
lowerCamelCase__ : str = x
else:
lowerCamelCase__ : str = x
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def a__ (self, lowerCamelCase_, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Dict = ()
lowerCamelCase__ : int = ()
lowerCamelCase__ : Dict = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
lowerCamelCase__ : Tuple = all_hidden_states + (hidden_states,)
lowerCamelCase__ : Any = layer_module(
lowerCamelCase_, lowerCamelCase_, head_mask[i], lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : str = layer_outputs[0]
if self.output_attentions:
lowerCamelCase__ : int = all_attentions + (layer_outputs[1],)
lowerCamelCase__ : Union[str, Any] = (hidden_states,)
if self.output_hidden_states:
lowerCamelCase__ : Optional[Any] = current_outputs + (all_hidden_states,)
if self.output_attentions:
lowerCamelCase__ : str = current_outputs + (all_attentions,)
lowerCamelCase__ : str = self.highway[i](lowerCamelCase_ )
# logits, pooled_output
if not self.training:
lowerCamelCase__ : Optional[int] = highway_exit[0]
lowerCamelCase__ : List[str] = entropy(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
lowerCamelCase__ : List[str] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
lowerCamelCase__ : Tuple = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(lowerCamelCase_, i + 1 )
else:
lowerCamelCase__ : Tuple = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
lowerCamelCase__ : int = all_hidden_states + (hidden_states,)
lowerCamelCase__ : Optional[Any] = (hidden_states,)
if self.output_hidden_states:
lowerCamelCase__ : Any = outputs + (all_hidden_states,)
if self.output_attentions:
lowerCamelCase__ : int = outputs + (all_attentions,)
lowerCamelCase__ : Optional[Any] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'The Bert Model transformer with early exiting (DeeBERT). ' , snake_case_ , )
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
super().__init__(lowerCamelCase_ )
lowerCamelCase__ : str = config
lowerCamelCase__ : Dict = BertEmbeddings(lowerCamelCase_ )
lowerCamelCase__ : Any = DeeBertEncoder(lowerCamelCase_ )
lowerCamelCase__ : Tuple = BertPooler(lowerCamelCase_ )
self.init_weights()
def a__ (self ):
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def a__ (self ):
'''simple docstring'''
return self.embeddings.word_embeddings
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = value
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(lowerCamelCase_ )
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
def a__ (self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, ):
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
lowerCamelCase__ : Optional[Any] = input_ids.size()
elif inputs_embeds is not None:
lowerCamelCase__ : int = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
lowerCamelCase__ : Dict = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCamelCase__ : str = torch.ones(lowerCamelCase_, device=lowerCamelCase_ )
if encoder_attention_mask is None:
lowerCamelCase__ : Any = torch.ones(lowerCamelCase_, device=lowerCamelCase_ )
if token_type_ids is None:
lowerCamelCase__ : Tuple = torch.zeros(lowerCamelCase_, dtype=torch.long, device=lowerCamelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCamelCase__ : torch.Tensor = self.get_extended_attention_mask(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
lowerCamelCase__ : List[str] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
lowerCamelCase__ : int = encoder_attention_mask[:, None, None, :]
lowerCamelCase__ : Tuple = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
lowerCamelCase__ : Union[str, Any] = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCamelCase__ : Optional[int] = self.get_head_mask(lowerCamelCase_, self.config.num_hidden_layers )
lowerCamelCase__ : Tuple = self.embeddings(
input_ids=lowerCamelCase_, position_ids=lowerCamelCase_, token_type_ids=lowerCamelCase_, inputs_embeds=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = self.encoder(
lowerCamelCase_, attention_mask=lowerCamelCase_, head_mask=lowerCamelCase_, encoder_hidden_states=lowerCamelCase_, encoder_attention_mask=lowerCamelCase_, )
lowerCamelCase__ : List[str] = encoder_outputs[0]
lowerCamelCase__ : Tuple = self.pooler(lowerCamelCase_ )
lowerCamelCase__ : List[str] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = message
lowerCamelCase__ : Tuple = exit_layer # start from 1!
class a_ ( nn.Module ):
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Optional[Any] = BertPooler(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob )
lowerCamelCase__ : Optional[Any] = nn.Linear(config.hidden_size, config.num_labels )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = encoder_outputs[0]
lowerCamelCase__ : Union[str, Any] = self.pooler(lowerCamelCase_ )
# "return" pooler_output
# BertModel
lowerCamelCase__ : Optional[Any] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
lowerCamelCase__ : List[Any] = bmodel_output[1]
lowerCamelCase__ : str = self.dropout(lowerCamelCase_ )
lowerCamelCase__ : Tuple = self.classifier(lowerCamelCase_ )
return logits, pooled_output
@add_start_docstrings(
'Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. ' , snake_case_ , )
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
super().__init__(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = config.num_labels
lowerCamelCase__ : Dict = config.num_hidden_layers
lowerCamelCase__ : Any = DeeBertModel(lowerCamelCase_ )
lowerCamelCase__ : Dict = nn.Dropout(config.hidden_dropout_prob )
lowerCamelCase__ : List[Any] = nn.Linear(config.hidden_size, self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
def a__ (self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=-1, lowerCamelCase_=False, ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.num_layers
try:
lowerCamelCase__ : int = self.bert(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, position_ids=lowerCamelCase_, head_mask=lowerCamelCase_, inputs_embeds=lowerCamelCase_, )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
lowerCamelCase__ : Optional[Any] = outputs[1]
lowerCamelCase__ : List[Any] = self.dropout(lowerCamelCase_ )
lowerCamelCase__ : Dict = self.classifier(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowerCamelCase__ : int = e.message
lowerCamelCase__ : List[Any] = e.exit_layer
lowerCamelCase__ : Optional[int] = outputs[0]
if not self.training:
lowerCamelCase__ : Dict = entropy(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : Optional[int] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowerCamelCase__ : List[str] = MSELoss()
lowerCamelCase__ : Union[str, Any] = loss_fct(logits.view(-1 ), labels.view(-1 ) )
else:
lowerCamelCase__ : List[Any] = CrossEntropyLoss()
lowerCamelCase__ : Union[str, Any] = loss_fct(logits.view(-1, self.num_labels ), labels.view(-1 ) )
# work with highway exits
lowerCamelCase__ : Optional[int] = []
for highway_exit in outputs[-1]:
lowerCamelCase__ : str = highway_exit[0]
if not self.training:
highway_logits_all.append(lowerCamelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowerCamelCase__ : Union[str, Any] = MSELoss()
lowerCamelCase__ : Any = loss_fct(highway_logits.view(-1 ), labels.view(-1 ) )
else:
lowerCamelCase__ : Tuple = CrossEntropyLoss()
lowerCamelCase__ : Optional[Any] = loss_fct(highway_logits.view(-1, self.num_labels ), labels.view(-1 ) )
highway_losses.append(lowerCamelCase_ )
if train_highway:
lowerCamelCase__ : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowerCamelCase__ : Tuple = (loss,) + outputs
if not self.training:
lowerCamelCase__ : Any = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowerCamelCase__ : Optional[Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 700 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : str = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
| 696 | 0 |
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
lowerCamelCase__ : str = flax_key_tuple[:-1] + ('weight',)
lowerCamelCase__ : int = torch.permute(_lowerCamelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCamelCase ):
# linear layer
lowerCamelCase__ : str = flax_key_tuple[:-1] + ('weight',)
lowerCamelCase__ : List[Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCamelCase__ : Dict = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if "metadata" in layer:
lowerCamelCase__ : List[Any] = layer.split('metadata' )
lowerCamelCase__ : List[Any] = ''.join(split_layer[0] )[:-1]
lowerCamelCase__ : Tuple = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
lowerCamelCase__ : Optional[Any] = layer.split('kvstore' )
lowerCamelCase__ : List[Any] = ''.join(split_layer[0] )[:-1]
lowerCamelCase__ : List[Any] = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
lowerCamelCase__ : Tuple = layer.split('/' )
lowerCamelCase__ : Dict = '/'.join(split_layer[:-1] )
lowerCamelCase__ : str = (split_layer[-1],)
if "kvstore/path" in layer:
lowerCamelCase__ : str = f'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
lowerCamelCase__ : str = 'file'
else:
lowerCamelCase__ : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Tuple = rename_keys(_lowerCamelCase )
lowerCamelCase__ : List[str] = {}
for k, v in current_block.items():
lowerCamelCase__ : List[str] = v
lowerCamelCase__ : Optional[Any] = new_current_block
torch.save(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = WEIGHTS_NAME ):
lowerCamelCase__ : List[Any] = convert_file_size_to_int(_lowerCamelCase )
lowerCamelCase__ : int = []
lowerCamelCase__ : Any = {}
lowerCamelCase__ : Union[str, Any] = 0
lowerCamelCase__ : Tuple = 0
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp:
lowerCamelCase__ : Optional[int] = serialization.msgpack_restore(fp.read() )['optimizer']['target']
lowerCamelCase__ : Optional[int] = flatten_dict(_lowerCamelCase , sep='/' )
lowerCamelCase__ : Dict = {}
for layer in checkpoint_info.keys():
lowerCamelCase__ : Optional[Any] = get_key_and_tensorstore_dict(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if curr_real_layer_name in all_layers:
lowerCamelCase__ : Union[str, Any] = content
else:
lowerCamelCase__ : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
lowerCamelCase__ : List[str] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
lowerCamelCase__ : int = torch.tensor(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
lowerCamelCase__ : Optional[int] = rename_base_flax_keys(tuple(key.split('/' ) ) , _lowerCamelCase )
lowerCamelCase__ : List[Any] = '/'.join(_lowerCamelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
lowerCamelCase__ : Any = os.path.join(
_lowerCamelCase , weights_name.replace('.bin' , f'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
rename_and_save_block(_lowerCamelCase , _lowerCamelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
lowerCamelCase__ : List[str] = {}
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Optional[Any] = raw_weights.to(getattr(_lowerCamelCase , _lowerCamelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
lowerCamelCase__ : Any = os.path.join(_lowerCamelCase , weights_name.replace('.bin' , f'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
rename_and_save_block(_lowerCamelCase , _lowerCamelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_lowerCamelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
lowerCamelCase__ : Tuple = {}
lowerCamelCase__ : str = {}
for idx, shard in enumerate(_lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = weights_name.replace(
'.bin' , f'''-{idx+1:05d}-of-{len(_lowerCamelCase ):05d}.bin''' ) # len(sharded_state_dicts):05d}
lowerCamelCase__ : int = os.path.join(_lowerCamelCase , weights_name.replace('.bin' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
lowerCamelCase__ : str = shard
for key in shard:
lowerCamelCase__ : Tuple = shard_file
# Add the metadata
lowerCamelCase__ : Union[str, Any] = {'total_size': total_size}
lowerCamelCase__ : Dict = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , 'w' , encoding='utf-8' ) as f:
lowerCamelCase__ : Any = json.dumps(_lowerCamelCase , indent=2 , sort_keys=_lowerCamelCase ) + '\n'
f.write(_lowerCamelCase )
return metadata, index
if __name__ == "__main__":
A_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
A_ : List[Any] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowerCamelCase_ ( ):
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
lowerCamelCase__ : List[str] = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
lowerCamelCase__ : Tuple = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' )
lowerCamelCase__ : Optional[int] = TaTokenizer.from_pretrained('t5-small' )
lowerCamelCase__ : Optional[int] = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
lowerCamelCase__ : str = tokenizer(_lowerCamelCase , return_tensors='pt' ).input_ids
lowerCamelCase__ : Tuple = model.generate(_lowerCamelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 701 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = 0
while number > 0:
lowerCamelCase__ : List[str] = number % 10
sum_of_digits += last_digit
lowerCamelCase__ : str = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCamelCase_ ( _lowerCamelCase = 100 ):
lowerCamelCase__ : Union[str, Any] = factorial(_lowerCamelCase )
lowerCamelCase__ : List[Any] = split_and_add(_lowerCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 696 | 0 |
"""simple docstring"""
import argparse
from collections import defaultdict
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = f'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(_lowerCamelCase , 'r' ) as f:
lowerCamelCase__ : Optional[int] = f.readlines()
lowerCamelCase__ : Tuple = f'''class {class_name}('''
lowerCamelCase__ : Union[str, Any] = f'''{4 * ' '}def {test_name}('''
lowerCamelCase__ : int = f'''{8 * ' '}{correct_line.split()[0]}'''
lowerCamelCase__ : List[str] = f'''{16 * ' '}{correct_line.split()[0]}'''
lowerCamelCase__ : Any = False
lowerCamelCase__ : str = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Any = 0
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : List[Any] = []
for line in lines:
if line.startswith(_lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = True
elif in_class and line.startswith(_lowerCamelCase ):
lowerCamelCase__ : Any = True
elif in_class and in_func and (line.startswith(_lowerCamelCase ) or line.startswith(_lowerCamelCase )):
lowerCamelCase__ : List[str] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
lowerCamelCase__ : str = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
lowerCamelCase__ : List[str] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'''{spaces * ' '}{correct_line}''' )
lowerCamelCase__ : Union[str, Any] = False
else:
new_lines.append(_lowerCamelCase )
with open(_lowerCamelCase , 'w' ) as f:
for line in new_lines:
f.write(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase=None ):
if fail is not None:
with open(_lowerCamelCase , 'r' ) as f:
lowerCamelCase__ : List[str] = {l.strip() for l in f.readlines()}
else:
lowerCamelCase__ : Tuple = None
with open(_lowerCamelCase , 'r' ) as f:
lowerCamelCase__ : Optional[int] = f.readlines()
lowerCamelCase__ : Union[str, Any] = defaultdict(_lowerCamelCase )
for line in correct_lines:
lowerCamelCase__ : int = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
A_ : Dict = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 702 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ : Dict = "pt"
elif is_tf_available():
A_ : Union[str, Any] = "tf"
else:
A_ : List[str] = "jax"
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = PerceiverTokenizer
lowerCamelCase__ : Optional[Any] = False
def a__ (self ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : int = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ (self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_=False, lowerCamelCase_=2_0, lowerCamelCase_=5 ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = []
for i in range(len(lowerCamelCase_ ) ):
try:
lowerCamelCase__ : Any = tokenizer.decode([i], clean_up_tokenization_spaces=lowerCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : Any = list(filter(lambda lowerCamelCase_ : re.match(r'^[ a-zA-Z]+$', t[1] ), lowerCamelCase_ ) )
lowerCamelCase__ : Union[str, Any] = list(filter(lambda lowerCamelCase_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=lowerCamelCase_ ), lowerCamelCase_ ) )
if max_length is not None and len(lowerCamelCase_ ) > max_length:
lowerCamelCase__ : int = toks[:max_length]
if min_length is not None and len(lowerCamelCase_ ) < min_length and len(lowerCamelCase_ ) > 0:
while len(lowerCamelCase_ ) < min_length:
lowerCamelCase__ : Dict = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : int = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : Optional[int] = tokenizer.decode(lowerCamelCase_, clean_up_tokenization_spaces=lowerCamelCase_ )
if " " not in output_txt and len(lowerCamelCase_ ) > 1:
lowerCamelCase__ : List[Any] = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=lowerCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=lowerCamelCase_ )
)
if with_prefix_space:
lowerCamelCase__ : Optional[Any] = ' ' + output_txt
lowerCamelCase__ : List[Any] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
return output_txt, output_ids
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = 'Unicode €.'
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_ )
lowerCamelCase__ : Dict = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : int = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]Unicode €.[SEP]' )
lowerCamelCase__ : List[str] = tokenizer('e è é ê ë' )
lowerCamelCase__ : Dict = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : Any = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ), '[CLS]e è é ê ë[SEP]' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
lowerCamelCase__ : List[Any] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
if FRAMEWORK != "jax":
lowerCamelCase__ : List[str] = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : int = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertEqual((2, 3_8), batch.input_ids.shape )
self.assertEqual((2, 3_8), batch.attention_mask.shape )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.perceiver_tokenizer
lowerCamelCase__ : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCamelCase__ : List[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids', lowerCamelCase_ )
self.assertIn('attention_mask', lowerCamelCase_ )
self.assertNotIn('decoder_input_ids', lowerCamelCase_ )
self.assertNotIn('decoder_attention_mask', lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : int = [
'Summary of the text.',
'Another summary.',
]
lowerCamelCase__ : str = tokenizer(
text_target=lowerCamelCase_, max_length=3_2, padding='max_length', truncation=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertEqual(3_2, targets['input_ids'].shape[1] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 4_2 )
# Now let's start the test
lowerCamelCase__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : str = ' He is very happy, UNwant\u00E9d,running'
lowerCamelCase__ : str = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : str = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
shutil.rmtree(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
lowerCamelCase__ : List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCamelCase__ : List[str] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : int = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertIn('new_additional_special_token', after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 4_2 )
lowerCamelCase__ : List[Any] = tokenizer.__class__.from_pretrained(lowerCamelCase_, model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length, 4_3 )
shutil.rmtree(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : Optional[Any] = json.load(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : List[str] = json.load(lowerCamelCase_ )
lowerCamelCase__ : Any = [f'''<extra_id_{i}>''' for i in range(1_2_5 )]
lowerCamelCase__ : Optional[int] = added_tokens_extra_ids + [
'an_additional_special_token'
]
lowerCamelCase__ : List[str] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : Dict = tokenizer_class.from_pretrained(
lowerCamelCase_, )
self.assertIn(
'an_additional_special_token', tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token', lstrip=lowerCamelCase_ )]
lowerCamelCase__ : Any = tokenizer_class.from_pretrained(
lowerCamelCase_, additional_special_tokens=lowerCamelCase_, )
self.assertIn('a_new_additional_special_token', tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ), )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ), '�' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.get_tokenizers(fast=lowerCamelCase_, do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : Tuple = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
lowerCamelCase__ : List[str] = tokenizer.convert_tokens_to_string(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A_ : Union[str, Any] = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
A_ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 703 |
"""simple docstring"""
from math import pi, sqrt, tan
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCamelCase__ : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_lowerCamelCase , 2 ) * torus_radius * tube_radius
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCamelCase__ : Dict = (sidea + sidea + sidea) / 2
lowerCamelCase__ : str = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 696 | 0 |
"""simple docstring"""
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
A_ : Optional[int] = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def lowerCamelCase_ ( _lowerCamelCase=None ):
if subparsers is not None:
lowerCamelCase__ : Dict = subparsers.add_parser('tpu-config' , description=_description )
else:
lowerCamelCase__ : str = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
lowerCamelCase__ : str = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=_lowerCamelCase , default=_lowerCamelCase , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=_lowerCamelCase , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=_lowerCamelCase , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
lowerCamelCase__ : Optional[int] = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=_lowerCamelCase , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=_lowerCamelCase )
return parser
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : int = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_lowerCamelCase ):
lowerCamelCase__ : Optional[int] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowerCamelCase__ : int = defaults.command_file
if not args.command and defaults.commands is not None:
lowerCamelCase__ : List[str] = defaults.commands
if not args.tpu_name:
lowerCamelCase__ : List[Any] = defaults.tpu_name
if not args.tpu_zone:
lowerCamelCase__ : Any = defaults.tpu_zone
if args.accelerate_version == "dev":
lowerCamelCase__ : Any = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
lowerCamelCase__ : int = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , _lowerCamelCase ):
lowerCamelCase__ : List[str] = f'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
lowerCamelCase__ : Any = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _lowerCamelCase ):
lowerCamelCase__ : Dict = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowerCamelCase__ : List[str] = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [f'''pip install {args.accelerate_version}''']
new_cmd += args.command
lowerCamelCase__ : int = '; '.join(_lowerCamelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowerCamelCase__ : str = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'''Running {' '.join(_lowerCamelCase )}''' )
return
subprocess.run(_lowerCamelCase )
print('Successfully setup pod.' )
def lowerCamelCase_ ( ):
lowerCamelCase__ : Optional[int] = tpu_command_parser()
lowerCamelCase__ : str = parser.parse_args()
tpu_command_launcher(_lowerCamelCase )
| 704 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : str = use_input_mask
lowerCamelCase__ : Optional[Any] = use_token_type_ids
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Optional[int] = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Dict = type_vocab_size
lowerCamelCase__ : Union[str, Any] = type_sequence_label_size
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : Union[str, Any] = num_choices
lowerCamelCase__ : List[str] = scope
lowerCamelCase__ : Dict = vocab_size - 1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Optional[Any] = None
if self.use_input_mask:
lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = self.get_config()
return config, input_ids, input_mask, token_labels
def a__ (self ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] = True
return config, input_ids, input_mask, token_labels
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = GPTNeoXModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.num_labels
lowerCamelCase__ : Optional[Any] = GPTNeoXForQuestionAnswering(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = self.num_labels
lowerCamelCase__ : Optional[int] = GPTNeoXForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[Any] = GPTNeoXForTokenClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Tuple = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# first forward pass
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, use_cache=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ : str = ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : List[Any] = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
lowerCamelCase__ : Tuple = torch.cat([input_ids, next_tokens], dim=-1 )
lowerCamelCase__ : Tuple = torch.cat([input_mask, next_mask], dim=-1 )
lowerCamelCase__ : List[str] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, output_hidden_states=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = output_from_no_past['hidden_states'][0]
lowerCamelCase__ : Optional[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, past_key_values=lowerCamelCase_, output_hidden_states=lowerCamelCase_, )['hidden_states'][0]
# select random slice
lowerCamelCase__ : Dict = ids_tensor((1,), output_from_past.shape[-1] ).item()
lowerCamelCase__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-3 ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = config_and_inputs
lowerCamelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : int = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ : Dict = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = GPTNeoXModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=6_4, num_attention_heads=8 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCamelCase__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def a__ (self ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[Any] = ids_tensor([1, 1_0], config.vocab_size )
lowerCamelCase__ : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Any = GPTNeoXModel(lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
original_model.eval()
lowerCamelCase__ : List[Any] = original_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = original_model(lowerCamelCase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Optional[int] = {'type': scaling_type, 'factor': 10.0}
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
scaled_model.to(lowerCamelCase_ )
scaled_model.eval()
lowerCamelCase__ : Tuple = scaled_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = scaled_model(lowerCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
lowerCamelCase__ : Optional[Any] = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = tokenizer('My favorite food is', return_tensors='pt' ).to(lowerCamelCase_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
lowerCamelCase__ : Dict = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
lowerCamelCase__ : Dict = model.generate(**lowerCamelCase_, do_sample=lowerCamelCase_, max_new_tokens=2_0 )
lowerCamelCase__ : Optional[Any] = tokenizer.batch_decode(lowerCamelCase_ )[0]
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
import re
import string
import numpy as np
import datasets
A_ : Optional[int] = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
A_ : List[str] = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
A_ : int = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('string', id='sequence' ),
'references': datasets.Value('string', id='sequence' ),
} ), reference_urls=[], )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, lowerCamelCase_=False, lowerCamelCase_=False, lowerCamelCase_=False, ):
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowerCamelCase__ : Dict = np.array([re.sub(lowerCamelCase_, '', lowerCamelCase_ ) for x in predictions] )
lowerCamelCase__ : List[Any] = np.array([re.sub(lowerCamelCase_, '', lowerCamelCase_ ) for x in references] )
else:
lowerCamelCase__ : int = np.asarray(lowerCamelCase_ )
lowerCamelCase__ : str = np.asarray(lowerCamelCase_ )
if ignore_case:
lowerCamelCase__ : Optional[Any] = np.char.lower(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = np.char.lower(lowerCamelCase_ )
if ignore_punctuation:
lowerCamelCase__ : Dict = string.punctuation.maketrans('', '', string.punctuation )
lowerCamelCase__ : Any = np.char.translate(lowerCamelCase_, table=lowerCamelCase_ )
lowerCamelCase__ : str = np.char.translate(lowerCamelCase_, table=lowerCamelCase_ )
if ignore_numbers:
lowerCamelCase__ : Optional[int] = string.digits.maketrans('', '', string.digits )
lowerCamelCase__ : Union[str, Any] = np.char.translate(lowerCamelCase_, table=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = np.char.translate(lowerCamelCase_, table=lowerCamelCase_ )
lowerCamelCase__ : Any = predictions == references
return {"exact_match": np.mean(lowerCamelCase_ ) * 1_0_0}
| 705 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A_ : Dict = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
A_ : List[Any] = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
A_ : Union[str, Any] = spec.loader.load_module()
A_ : int = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A_ : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
A_ : str = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def lowerCamelCase_ ( ):
lowerCamelCase__ : Dict = []
for config_class in list(CONFIG_MAPPING.values() ):
lowerCamelCase__ : Dict = False
# source code of `config_class`
lowerCamelCase__ : str = inspect.getsource(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = _re_checkpoint.findall(_lowerCamelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
lowerCamelCase__ : Any = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
lowerCamelCase__ : Any = True
break
lowerCamelCase__ : Dict = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowerCamelCase__ : Optional[Any] = '\n'.join(sorted(_lowerCamelCase ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 696 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = analyze_text(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
lowerCamelCase__ : List[Any] = sum(single_char_strings.values() )
# one length string
lowerCamelCase__ : str = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCamelCase__ : Tuple = single_char_strings[ch]
lowerCamelCase__ : Union[str, Any] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowerCamelCase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
lowerCamelCase__ : Dict = sum(two_char_strings.values() )
lowerCamelCase__ : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCamelCase__ : int = cha + cha
if sequence in two_char_strings:
lowerCamelCase__ : int = two_char_strings[sequence]
lowerCamelCase__ : Tuple = int(_lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(_lowerCamelCase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = Counter() # type: ignore
lowerCamelCase__ : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCamelCase_ ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 706 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Tuple = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 696 | 0 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : Optional[int] = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Any = 'van'
def __init__(self, lowerCamelCase_=2_2_4, lowerCamelCase_=3, lowerCamelCase_=[7, 3, 3, 3], lowerCamelCase_=[4, 2, 2, 2], lowerCamelCase_=[6_4, 1_2_8, 3_2_0, 5_1_2], lowerCamelCase_=[3, 3, 1_2, 3], lowerCamelCase_=[8, 8, 4, 4], lowerCamelCase_="gelu", lowerCamelCase_=0.02, lowerCamelCase_=1e-6, lowerCamelCase_=1e-2, lowerCamelCase_=0.0, lowerCamelCase_=0.0, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
lowerCamelCase__ : Dict = image_size
lowerCamelCase__ : Dict = num_channels
lowerCamelCase__ : Dict = patch_sizes
lowerCamelCase__ : Any = strides
lowerCamelCase__ : int = hidden_sizes
lowerCamelCase__ : List[str] = depths
lowerCamelCase__ : Dict = mlp_ratios
lowerCamelCase__ : Tuple = hidden_act
lowerCamelCase__ : Any = initializer_range
lowerCamelCase__ : Tuple = layer_norm_eps
lowerCamelCase__ : Union[str, Any] = layer_scale_init_value
lowerCamelCase__ : List[Any] = drop_path_rate
lowerCamelCase__ : Dict = dropout_rate
| 707 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
A_ : Optional[int] = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
A_ : List[str] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
A_ : Tuple = BeautifulSoup(res.text, "html.parser")
A_ : Dict = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f"https://google.com{link.get('href')}")
| 696 | 0 |
"""simple docstring"""
import os
def lowerCamelCase_ ( ):
with open(os.path.dirname(_lowerCamelCase ) + '/p022_names.txt' ) as file:
lowerCamelCase__ : Union[str, Any] = str(file.readlines()[0] )
lowerCamelCase__ : int = names.replace('"' , '' ).split(',' )
names.sort()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : str = 0
for i, name in enumerate(_lowerCamelCase ):
for letter in name:
name_score += ord(_lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 708 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowerCamelCase__ : Tuple = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_, cache_dir=lowerCamelCase_ )
lowerCamelCase__ : List[str] = [t[-1] for t in os.walk(os.path.join(lowerCamelCase_, os.listdir(lowerCamelCase_ )[0], 'snapshots' ) )]
lowerCamelCase__ : Optional[int] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[int] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Any = 4
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : List[Any] = num_samples * [prompt]
lowerCamelCase__ : Optional[int] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : int = replicate(lowerCamelCase_ )
lowerCamelCase__ : Any = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : int = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1_514_745 ) < 1e-3
assert np.abs(np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 49_947.875 ) < 5e-1
lowerCamelCase__ : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase_ ) == num_samples
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='flax', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : int = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[str] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : List[str] = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = shard(lowerCamelCase_ )
lowerCamelCase__ : str = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05_652_401) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_383_808.2) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Union[str, Any] = 5_0
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : Tuple = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Any = replicate(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : int = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa )
lowerCamelCase__ : Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Union[str, Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Optional[Any] = 5_0
lowerCamelCase__ : Tuple = jax.device_count()
lowerCamelCase__ : Optional[int] = num_samples * [prompt]
lowerCamelCase__ : str = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Optional[int] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : List[str] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = FlaxDDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='scaled_linear', set_alpha_to_one=lowerCamelCase_, steps_offset=1, )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, scheduler=lowerCamelCase_, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : List[str] = scheduler.create_state()
lowerCamelCase__ : int = scheduler_state
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : Optional[Any] = jax.device_count()
lowerCamelCase__ : Any = num_samples * [prompt]
lowerCamelCase__ : Any = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Dict = shard(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045_043_945) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_347_693.5) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : int = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : str = jax.random.split(jax.random.PRNGKey(0 ), lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Dict = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Tuple = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : int = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
lowerCamelCase__ , lowerCamelCase__ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, use_memory_efficient_attention=lowerCamelCase_, )
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : Any = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : Any = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 696 | 0 |
"""simple docstring"""
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
A_ : List[Any] = logging.get_logger(__name__)
A_ : Optional[int] = "T5Config"
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : int = jnp.zeros_like(_lowerCamelCase )
lowerCamelCase__ : Tuple = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowerCamelCase__ : List[str] = shifted_input_ids.at[:, 0].set(_lowerCamelCase )
lowerCamelCase__ : int = jnp.where(shifted_input_ids == -100 , _lowerCamelCase , _lowerCamelCase )
return shifted_input_ids
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = 'mt5'
lowerCamelCase__ : Optional[Any] = MTaConfig
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = 'mt5'
lowerCamelCase__ : List[Any] = MTaConfig
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = 'mt5'
lowerCamelCase__ : List[Any] = MTaConfig
| 709 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
A_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_, scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__(self, lowerCamelCase_ = 1, lowerCamelCase_ = 1_0_0, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = True, ):
'''simple docstring'''
if audio_length_in_s is None:
lowerCamelCase__ : str = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCamelCase__ : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate
lowerCamelCase__ : str = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowerCamelCase__ : Dict = int(lowerCamelCase_ )
if sample_size % down_scale_factor != 0:
lowerCamelCase__ : Union[str, Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
' process.' )
lowerCamelCase__ : Optional[Any] = int(lowerCamelCase_ )
lowerCamelCase__ : List[str] = next(iter(self.unet.parameters() ) ).dtype
lowerCamelCase__ : Union[str, Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase_, lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase__ : Union[str, Any] = randn_tensor(lowerCamelCase_, generator=lowerCamelCase_, device=self.device, dtype=lowerCamelCase_ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_, device=audio.device )
lowerCamelCase__ : int = self.scheduler.timesteps.to(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase__ : List[Any] = self.unet(lowerCamelCase_, lowerCamelCase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCamelCase__ : List[str] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Union[str, Any] = audio.clamp(-1, 1 ).float().cpu().numpy()
lowerCamelCase__ : Tuple = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : Optional[Any] = use_input_mask
lowerCamelCase__ : List[Any] = use_token_type_ids
lowerCamelCase__ : List[Any] = use_labels
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : Optional[int] = embedding_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Dict = num_choices
lowerCamelCase__ : Tuple = scope
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : List[str] = None
if self.use_input_mask:
lowerCamelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_token_type_ids:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Any = None
lowerCamelCase__ : Union[str, Any] = None
if self.use_labels:
lowerCamelCase__ : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ):
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, embedding_size=self.embedding_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, next_sentence_label=lowerCamelCase_, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, start_positions=lowerCamelCase_, end_positions=lowerCamelCase_, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : int = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.num_choices
lowerCamelCase__ : Dict = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : int = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
(
lowerCamelCase__
) : List[str] = config_and_inputs
lowerCamelCase__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : int = True
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
lowerCamelCase__ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase_ )
return inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = MobileBertModelTester(self )
lowerCamelCase__ : List[str] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
return torch.tensor(
_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase , )
A_ : Tuple = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(lowerCamelCase_ )
lowerCamelCase__ : Tuple = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )[0]
lowerCamelCase__ : Optional[int] = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[
[
[-2.4_736_526e07, 8.2_691_656e04, 1.6_521_838e05],
[-5.7_541_704e-01, 3.9_056_022e00, 4.4_011_507e00],
[2.6_047_359e00, 1.5_677_652e00, -1.7_324_188e-01],
]
], device=lowerCamelCase_, )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCamelCase__ : Optional[int] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCamelCase__ : Any = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 710 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(lowerCamelCase_ ) )
vocab_file.flush()
lowerCamelCase__ : Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase__ : Optional[Any] = BertModel(BertConfig(vocab_size=len(lowerCamelCase_ ) ) )
model.save_pretrained(lowerCamelCase_ )
self._test_export(lowerCamelCase_, 'pt', 1_2, lowerCamelCase_ )
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Optional[Any] = self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Any = quantize(Path(lowerCamelCase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Any = self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = quantize(lowerCamelCase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase__ : str = Path(lowerCamelCase_ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ )
return path
except Exception as e:
self.fail(lowerCamelCase_ )
@require_torch
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : str = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'pt' )
@require_tf
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import TFBertModel
lowerCamelCase__ : Dict = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Optional[int] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'tf' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = FeatureExtractionPipeline(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = infer_shapes(lowerCamelCase_, lowerCamelCase_ )
# Assert all variables are present
self.assertEqual(len(lowerCamelCase_ ), len(lowerCamelCase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3], lowerCamelCase_ )
self.assertSequenceEqual(variable_names[3:], lowerCamelCase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name], {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'], {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'], {0: 'batch'} )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['input_ids', 'attention_mask', 'token_type_ids']
lowerCamelCase__ : Optional[int] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowerCamelCase__ , lowerCamelCase__ : str = ensure_valid_input(FuncContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCamelCase_ ), 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCamelCase_ ), set(lowerCamelCase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCamelCase_, (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase__ , lowerCamelCase__ : Any = ensure_valid_input(FuncNonContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCamelCase_ ), 1 )
self.assertEqual(len(lowerCamelCase_ ), 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0], tokens['input_ids'] )
self.assertEqual(ordered_input_names[0], 'input_ids' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ), '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx', generated.as_posix() )
| 696 | 0 |
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
A_ : List[str] = logging.get_logger(__name__)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
if not is_sharded:
lowerCamelCase__ : int = os.path.abspath(_lowerCamelCase )
logger.info(f'''Loading PyTorch weights from {pt_path}''' )
lowerCamelCase__ : Union[str, Any] = torch.load(_lowerCamelCase , map_location='cpu' )
logger.info(f'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
lowerCamelCase__ : Dict = convert_pytorch_state_dict_to_flax(_lowerCamelCase , _lowerCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowerCamelCase__ : Union[str, Any] = convert_pytorch_sharded_state_dict_to_flax(_lowerCamelCase , _lowerCamelCase )
return flax_state_dict
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(_lowerCamelCase ) -> bool:
return len(set(_lowerCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowerCamelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ('scale',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowerCamelCase__ : List[Any] = pt_tuple_key[:-1] + ('mean',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowerCamelCase__ : int = pt_tuple_key[:-1] + ('var',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowerCamelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ('embedding',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase__ : List[Any] = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_lowerCamelCase ):
lowerCamelCase__ : Tuple = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase__ : int = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_lowerCamelCase ):
lowerCamelCase__ : Optional[int] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase__ : List[str] = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase__ : int = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowerCamelCase__ : Union[str, Any] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowerCamelCase__ : Tuple = pt_tuple_key[-2] + '_g'
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowerCamelCase__ : Dict = pt_tuple_key[-2] + '_v'
if name is not None:
lowerCamelCase__ : Any = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
lowerCamelCase__ : List[str] = {k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase__ : Tuple = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowerCamelCase__ : int = flax_model.params['params']
else:
lowerCamelCase__ : Union[str, Any] = flax_model.params
lowerCamelCase__ : str = flatten_dict(_lowerCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase__ : Dict = flatten_dict(flax_model.params['batch_stats'] )
random_flax_state_dict.update(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = {}
lowerCamelCase__ : Optional[Any] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase__ : Any = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase__ : str = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
lowerCamelCase__ : Optional[int] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase__ : Any = pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase__ : List[Any] = rename_key_and_reshape_tensor(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# add model prefix if necessary
lowerCamelCase__ : Dict = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase__ : Optional[int] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowerCamelCase__ : str = jnp.asarray(_lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase__ : Optional[Any] = jnp.asarray(_lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase__ : str = jnp.asarray(_lowerCamelCase )
return unflatten_dict(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
import torch
# Load the index
lowerCamelCase__ : List[str] = {}
for shard_file in shard_filenames:
# load using msgpack utils
lowerCamelCase__ : List[Any] = torch.load(_lowerCamelCase )
lowerCamelCase__ : str = {k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase__ : Dict = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase__ : Optional[int] = flax_model.params['params']
lowerCamelCase__ : List[Any] = flatten_dict(_lowerCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'] ) )
else:
lowerCamelCase__ : int = flax_model.params
lowerCamelCase__ : str = flatten_dict(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase__ : Any = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase__ : Any = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
lowerCamelCase__ : Optional[int] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase__ : List[str] = pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase__ : Dict = rename_key_and_reshape_tensor(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# add model prefix if necessary
lowerCamelCase__ : Any = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase__ : str = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowerCamelCase__ : List[str] = jnp.asarray(_lowerCamelCase )
continue
if "var" in flax_key[-1]:
lowerCamelCase__ : Tuple = jnp.asarray(_lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase__ : Dict = jnp.asarray(_lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase__ : List[Any] = jnp.asarray(_lowerCamelCase )
return unflatten_dict(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
lowerCamelCase__ : Any = os.path.abspath(_lowerCamelCase )
logger.info(f'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
lowerCamelCase__ : int = getattr(_lowerCamelCase , 'Flax' + model.__class__.__name__ )
# load flax weight dict
with open(_lowerCamelCase , 'rb' ) as state_f:
try:
lowerCamelCase__ : Union[str, Any] = from_bytes(_lowerCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
lowerCamelCase__ : Optional[Any] = flatten_dict(jax.tree_util.tree_map(lambda _lowerCamelCase : x.dtype == jnp.bfloataa , _lowerCamelCase ) ).values()
if any(_lowerCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
lowerCamelCase__ : Union[str, Any] = jax.tree_util.tree_map(
lambda _lowerCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _lowerCamelCase )
lowerCamelCase__ : str = flatten_dict(_lowerCamelCase )
lowerCamelCase__ : List[str] = pt_model.state_dict()
lowerCamelCase__ : Optional[Any] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
lowerCamelCase__ : str = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : Tuple = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCamelCase__ : Union[str, Any] = flax_key_tuple[0] == pt_model.base_model_prefix
lowerCamelCase__ : Union[str, Any] = '.'.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase__ : str = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase__ : Dict = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_lowerCamelCase ) not in pt_model_dict:
# conv layer
lowerCamelCase__ : List[Any] = flax_key_tuple[:-1] + ('weight',)
lowerCamelCase__ : List[Any] = jnp.transpose(_lowerCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCamelCase ) not in pt_model_dict:
# linear layer
lowerCamelCase__ : List[Any] = flax_key_tuple[:-1] + ('weight',)
lowerCamelCase__ : Union[str, Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCamelCase__ : List[Any] = flax_key_tuple[:-1] + ('weight',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowerCamelCase__ : Union[str, Any] = flax_key_tuple[:-1] + ('running_mean',)
elif "var" in flax_key_tuple[-1]:
lowerCamelCase__ : int = flax_key_tuple[:-1] + ('running_var',)
if "batch_stats" in flax_state:
lowerCamelCase__ : Optional[Any] = '.'.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowerCamelCase__ : Tuple = '.'.join(_lowerCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowerCamelCase__ : List[Any] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowerCamelCase__ : int = key.split('.' )
lowerCamelCase__ : Any = None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowerCamelCase__ : List[str] = key_components[-2] + '_g'
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowerCamelCase__ : List[str] = key_components[-2] + '_v'
if name is not None:
lowerCamelCase__ : str = key_components[:-3] + [name]
lowerCamelCase__ : Tuple = '.'.join(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = key
if flax_key in special_pt_names:
lowerCamelCase__ : Dict = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
f'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
lowerCamelCase__ : Union[str, Any] = np.asarray(_lowerCamelCase ) if not isinstance(_lowerCamelCase , np.ndarray ) else flax_tensor
lowerCamelCase__ : List[Any] = torch.from_numpy(_lowerCamelCase )
# remove from missing keys
missing_keys.remove(_lowerCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_lowerCamelCase )
pt_model.load_state_dict(_lowerCamelCase )
# re-transform missing_keys to list
lowerCamelCase__ : List[Any] = list(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
f''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
f''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
f''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
else:
logger.warning(f'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(_lowerCamelCase ) > 0:
logger.warning(
f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
f''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
' use it for predictions and inference.' )
else:
logger.warning(
f'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
'If your task is similar to the task the model of the checkpoint was trained on, '
f'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 711 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = KandinskyVaaControlnetImgaImgPipeline
lowerCamelCase__ : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : Dict = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : str = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase__ : Any = False
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a__ (self ):
'''simple docstring'''
return 1_0_0
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase__ : int = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def a__ (self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.dummy_unet
lowerCamelCase__ : List[Any] = self.dummy_movq
lowerCamelCase__ : Tuple = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCamelCase__ : Optional[Any] = DDIMScheduler(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create init_image
lowerCamelCase__ : Any = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Dict = image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase__ : Optional[Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create hint
lowerCamelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : int = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : Any = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 'cpu'
lowerCamelCase__ : List[Any] = self.get_dummy_components()
lowerCamelCase__ : List[Any] = self.pipeline_class(**lowerCamelCase_ )
lowerCamelCase__ : Dict = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Any = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
lowerCamelCase__ : List[Any] = output.images
lowerCamelCase__ : str = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ), return_dict=lowerCamelCase_, )[0]
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
lowerCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase__ : List[str] = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
lowerCamelCase__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase__ : Any = init_image.resize((5_1_2, 5_1_2) )
lowerCamelCase__ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowerCamelCase__ : Any = torch.from_numpy(np.array(lowerCamelCase_ ) ).float() / 255.0
lowerCamelCase__ : Optional[int] = hint.permute(2, 0, 1 ).unsqueeze(0 )
lowerCamelCase__ : Union[str, Any] = 'A robot, 4k photo'
lowerCamelCase__ : Any = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth', torch_dtype=torch.floataa )
lowerCamelCase__ : int = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : str = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = pipe_prior(
lowerCamelCase_, image=lowerCamelCase_, strength=0.85, generator=lowerCamelCase_, negative_prompt='', ).to_tuple()
lowerCamelCase__ : Union[str, Any] = pipeline(
image=lowerCamelCase_, image_embeds=lowerCamelCase_, negative_image_embeds=lowerCamelCase_, hint=lowerCamelCase_, generator=lowerCamelCase_, num_inference_steps=1_0_0, height=5_1_2, width=5_1_2, strength=0.5, output_type='np', )
lowerCamelCase__ : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCamelCase_, lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = StableDiffusionXLImgaImgPipeline
lowerCamelCase__ : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCamelCase__ : Any = PipelineTesterMixin.required_optional_params - {'latents'}
lowerCamelCase__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4), layers_per_block=2, sample_size=3_2, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), attention_head_dim=(2, 4), use_linear_projection=lowerCamelCase_, addition_embed_type='text_time', addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=8_0, cross_attention_dim=6_4, )
lowerCamelCase__ : Optional[int] = EulerDiscreteScheduler(
beta_start=0.00_085, beta_end=0.012, steps_offset=1, beta_schedule='scaled_linear', timestep_spacing='leading', )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=1_2_8, )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=3_2, intermediate_size=3_7, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_0_0_0, hidden_act='gelu', projection_dim=3_2, )
lowerCamelCase__ : Optional[Any] = CLIPTextModel(lowerCamelCase_ )
lowerCamelCase__ : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = CLIPTextModelWithProjection(lowerCamelCase_ )
lowerCamelCase__ : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
lowerCamelCase__ : int = floats_tensor((1, 3, 3_2, 3_2), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : str = image / 2 + 0.5
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : str = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : str = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : Optional[int] = self.get_dummy_components()
lowerCamelCase__ : Optional[Any] = StableDiffusionXLImgaImgPipeline(**lowerCamelCase_ )
lowerCamelCase__ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : str = self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = sd_pipe(**lowerCamelCase_ ).images
lowerCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase__ : Tuple = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ (self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def a__ (self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.get_dummy_components()
lowerCamelCase__ : Tuple = StableDiffusionXLImgaImgPipeline(**lowerCamelCase_ )
lowerCamelCase__ : List[str] = sd_pipe.to(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
# forward without prompt embeds
lowerCamelCase__ : Tuple = self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = 3 * ['this is a negative prompt']
lowerCamelCase__ : Optional[Any] = negative_prompt
lowerCamelCase__ : Any = 3 * [inputs['prompt']]
lowerCamelCase__ : int = sd_pipe(**lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCamelCase__ : List[Any] = self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : Any = 3 * ['this is a negative prompt']
lowerCamelCase__ : Any = 3 * [inputs.pop('prompt' )]
(
lowerCamelCase__
) : Optional[int] = sd_pipe.encode_prompt(lowerCamelCase_, negative_prompt=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = sd_pipe(
**lowerCamelCase_, prompt_embeds=lowerCamelCase_, negative_prompt_embeds=lowerCamelCase_, pooled_prompt_embeds=lowerCamelCase_, negative_pooled_prompt_embeds=lowerCamelCase_, )
lowerCamelCase__ : Any = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ (self, lowerCamelCase_, lowerCamelCase_="cpu", lowerCamelCase_=torch.floataa, lowerCamelCase_=0 ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = np.random.RandomState(lowerCamelCase_ ).standard_normal((1, 4, 6_4, 6_4) )
lowerCamelCase__ : Tuple = torch.from_numpy(lowerCamelCase_ ).to(device=lowerCamelCase_, dtype=lowerCamelCase_ )
lowerCamelCase__ : Dict = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : List[Any] = self.get_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = pipe(**lowerCamelCase_ ).images
lowerCamelCase__ : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : Optional[int] = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 712 |
"""simple docstring"""
A_ : List[str] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 696 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : Optional[Any] = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = ["ConditionalDetrFeatureExtractor"]
A_ : str = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
A_ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 713 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
A_ : Optional[int] = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
if group not in primes:
raise ValueError('Unsupported Group' )
lowerCamelCase__ : int = primes[group]['prime']
lowerCamelCase__ : Optional[int] = primes[group]['generator']
lowerCamelCase__ : Any = int(hexlify(urandom(3_2 ) ), base=1_6 )
def a__ (self ):
'''simple docstring'''
return hex(self.__private_key )[2:]
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = pow(self.generator, self.__private_key, self.prime )
return hex(lowerCamelCase_ )[2:]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase_, (self.prime - 1) // 2, self.prime ) == 1
)
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = int(lowerCamelCase_, base=1_6 )
if not self.is_valid_public_key(lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Tuple = pow(lowerCamelCase_, self.__private_key, self.prime )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase_, (prime - 1) // 2, lowerCamelCase_ ) == 1
)
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
lowerCamelCase__ : Dict = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[Any] = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[str] = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase_, lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Dict = pow(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : str = ['onnx']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['onnx'] )
@classmethod
def a__ (cls, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(cls, ['onnx'] )
@classmethod
def a__ (cls, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(cls, ['onnx'] )
| 714 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(_lowerCamelCase ) * abs(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 696 | 0 |
"""simple docstring"""
from __future__ import annotations
A_ : int = "#"
class a_ :
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
lowerCamelCase__ : dict = {}
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self._trie
for char in text:
if char not in trie:
lowerCamelCase__ : Dict = {}
lowerCamelCase__ : Optional[Any] = trie[char]
lowerCamelCase__ : List[str] = True
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self._trie
for char in prefix:
if char in trie:
lowerCamelCase__ : Optional[Any] = trie[char]
else:
return []
return self._elements(lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = []
for c, v in d.items():
lowerCamelCase__ : List[str] = [' '] if c == END else [(c + s) for s in self._elements(lowerCamelCase_ )]
result.extend(lowerCamelCase_ )
return tuple(lowerCamelCase_ )
A_ : List[Any] = Trie()
A_ : int = ("depart", "detergent", "daring", "dog", "deer", "deal")
for word in words:
trie.insert_word(word)
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : str = trie.find_word(_lowerCamelCase )
return tuple(string + word for word in suffixes )
def lowerCamelCase_ ( ):
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 715 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : int = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : Optional[int] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : str = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : Optional[int] = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Tuple = c.n_embd + 1 # int
lowerCamelCase__ : Union[str, Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : List[Any] = not c.scale_attn_weights # bool
lowerCamelCase__ : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = PretrainedConfig()
lowerCamelCase__ : Optional[Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : Any = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : int = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = mock.Mock()
lowerCamelCase__ : List[str] = 5_0_0
lowerCamelCase__ : Any = {}
lowerCamelCase__ : int = HTTPError
lowerCamelCase__ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : str = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : str = ['config.42.0.0.json']
lowerCamelCase__ : Union[str, Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Optional[int] = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Dict = 'v3.0.0'
lowerCamelCase__ : List[str] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 696 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowerCamelCase__ : Dict = len(_lowerCamelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_lowerCamelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _lowerCamelCase , _lowerCamelCase , )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : list[list[str]] = []
depth_first_search([] , [] , [] , _lowerCamelCase , _lowerCamelCase )
# Print all the boards
for board in boards:
for column in board:
print(_lowerCamelCase )
print('' )
print(len(_lowerCamelCase ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 716 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Dict = value_function
lowerCamelCase__ : int = unet
lowerCamelCase__ : Union[str, Any] = scheduler
lowerCamelCase__ : int = env
lowerCamelCase__ : List[Any] = env.get_dataset()
lowerCamelCase__ : Dict = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Optional[Any] = self.data[key].mean()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[int] = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Tuple = self.data[key].std()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[Any] = env.observation_space.shape[0]
lowerCamelCase__ : List[str] = env.action_space.shape[0]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if type(lowerCamelCase_ ) is dict:
return {k: self.to_torch(lowerCamelCase_ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase_ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase_, device=self.unet.device )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for key, val in cond.items():
lowerCamelCase__ : Optional[Any] = val.clone()
return x_in
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = x.shape[0]
lowerCamelCase__ : Tuple = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowerCamelCase__ : Dict = torch.full((batch_size,), lowerCamelCase_, device=self.unet.device, dtype=torch.long )
for _ in range(lowerCamelCase_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowerCamelCase__ : str = self.value_function(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample
lowerCamelCase__ : Union[str, Any] = torch.autograd.grad([y.sum()], [x] )[0]
lowerCamelCase__ : Optional[int] = self.scheduler._get_variance(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = torch.exp(0.5 * posterior_variance )
lowerCamelCase__ : Tuple = model_std * grad
lowerCamelCase__ : str = 0
lowerCamelCase__ : Dict = x.detach()
lowerCamelCase__ : Dict = x + scale * grad
lowerCamelCase__ : Optional[int] = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : Tuple = self.unet(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample.permute(0, 2, 1 )
# TODO: verify deprecation of this kwarg
lowerCamelCase__ : Optional[Any] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, predict_epsilon=lowerCamelCase_ )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowerCamelCase__ : Any = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
return x, y
def __call__(self, lowerCamelCase_, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=0.1 ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.normalize(lowerCamelCase_, 'observations' )
lowerCamelCase__ : List[str] = obs[None].repeat(lowerCamelCase_, axis=0 )
lowerCamelCase__ : str = {0: self.to_torch(lowerCamelCase_ )}
lowerCamelCase__ : Optional[Any] = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowerCamelCase__ : List[Any] = randn_tensor(lowerCamelCase_, device=self.unet.device )
lowerCamelCase__ : int = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
# run the diffusion process
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.run_diffusion(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# sort output trajectories by value
lowerCamelCase__ : Union[str, Any] = y.argsort(0, descending=lowerCamelCase_ ).squeeze()
lowerCamelCase__ : List[str] = x[sorted_idx]
lowerCamelCase__ : Optional[Any] = sorted_values[:, :, : self.action_dim]
lowerCamelCase__ : Union[str, Any] = actions.detach().cpu().numpy()
lowerCamelCase__ : Union[str, Any] = self.de_normalize(lowerCamelCase_, key='actions' )
# select the action with the highest value
if y is not None:
lowerCamelCase__ : str = 0
else:
# if we didn't run value guiding, select a random action
lowerCamelCase__ : Optional[Any] = np.random.randint(0, lowerCamelCase_ )
lowerCamelCase__ : Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 696 | 0 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : str = use_input_mask
lowerCamelCase__ : Optional[Any] = use_token_type_ids
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Optional[int] = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Dict = type_vocab_size
lowerCamelCase__ : Union[str, Any] = type_sequence_label_size
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : Union[str, Any] = num_choices
lowerCamelCase__ : List[str] = scope
lowerCamelCase__ : Dict = vocab_size - 1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Optional[Any] = None
if self.use_input_mask:
lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = self.get_config()
return config, input_ids, input_mask, token_labels
def a__ (self ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] = True
return config, input_ids, input_mask, token_labels
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = GPTNeoXModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.num_labels
lowerCamelCase__ : Optional[Any] = GPTNeoXForQuestionAnswering(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = self.num_labels
lowerCamelCase__ : Optional[int] = GPTNeoXForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[Any] = GPTNeoXForTokenClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Tuple = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# first forward pass
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, use_cache=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ : str = ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : List[Any] = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
lowerCamelCase__ : Tuple = torch.cat([input_ids, next_tokens], dim=-1 )
lowerCamelCase__ : Tuple = torch.cat([input_mask, next_mask], dim=-1 )
lowerCamelCase__ : List[str] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, output_hidden_states=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = output_from_no_past['hidden_states'][0]
lowerCamelCase__ : Optional[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, past_key_values=lowerCamelCase_, output_hidden_states=lowerCamelCase_, )['hidden_states'][0]
# select random slice
lowerCamelCase__ : Dict = ids_tensor((1,), output_from_past.shape[-1] ).item()
lowerCamelCase__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-3 ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ : Dict = config_and_inputs
lowerCamelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : int = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ : Dict = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = GPTNeoXModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=6_4, num_attention_heads=8 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCamelCase__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def a__ (self ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[Any] = ids_tensor([1, 1_0], config.vocab_size )
lowerCamelCase__ : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Any = GPTNeoXModel(lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
original_model.eval()
lowerCamelCase__ : List[Any] = original_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = original_model(lowerCamelCase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Optional[int] = {'type': scaling_type, 'factor': 10.0}
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
scaled_model.to(lowerCamelCase_ )
scaled_model.eval()
lowerCamelCase__ : Tuple = scaled_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = scaled_model(lowerCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
lowerCamelCase__ : Optional[Any] = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = tokenizer('My favorite food is', return_tensors='pt' ).to(lowerCamelCase_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
lowerCamelCase__ : Dict = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
lowerCamelCase__ : Dict = model.generate(**lowerCamelCase_, do_sample=lowerCamelCase_, max_new_tokens=2_0 )
lowerCamelCase__ : Optional[Any] = tokenizer.batch_decode(lowerCamelCase_ )[0]
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
| 717 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = analyze_text(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
lowerCamelCase__ : List[Any] = sum(single_char_strings.values() )
# one length string
lowerCamelCase__ : str = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCamelCase__ : Tuple = single_char_strings[ch]
lowerCamelCase__ : Union[str, Any] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowerCamelCase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
lowerCamelCase__ : Dict = sum(two_char_strings.values() )
lowerCamelCase__ : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCamelCase__ : int = cha + cha
if sequence in two_char_strings:
lowerCamelCase__ : int = two_char_strings[sequence]
lowerCamelCase__ : Tuple = int(_lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(_lowerCamelCase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = Counter() # type: ignore
lowerCamelCase__ : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCamelCase_ ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 696 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
A_ : Optional[Any] = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
A_ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 718 |
"""simple docstring"""
import os
def lowerCamelCase_ ( ):
with open(os.path.dirname(_lowerCamelCase ) + '/p022_names.txt' ) as file:
lowerCamelCase__ : Union[str, Any] = str(file.readlines()[0] )
lowerCamelCase__ : int = names.replace('"' , '' ).split(',' )
names.sort()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : str = 0
for i, name in enumerate(_lowerCamelCase ):
for letter in name:
name_score += ord(_lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 696 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
# Construct model
if gpta_config_file == "":
lowerCamelCase__ : Any = GPTaConfig()
else:
lowerCamelCase__ : Any = GPTaConfig.from_json_file(_lowerCamelCase )
lowerCamelCase__ : Tuple = GPTaModel(_lowerCamelCase )
# Load weights from numpy
load_tf_weights_in_gpta(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
lowerCamelCase__ : List[str] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowerCamelCase__ : int = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , _lowerCamelCase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
A_ : Optional[int] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 719 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : int = 'Speech2TextFeatureExtractor'
lowerCamelCase__ : Dict = 'Speech2TextTokenizer'
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
def __call__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase_, **lowerCamelCase_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowerCamelCase__ : Optional[int] = kwargs.pop('raw_speech' )
else:
lowerCamelCase__ : int = kwargs.pop('audio', lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = kwargs.pop('sampling_rate', lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = kwargs.pop('text', lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
lowerCamelCase__ : List[str] = args[0]
lowerCamelCase__ : Any = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowerCamelCase__ : Union[str, Any] = self.feature_extractor(lowerCamelCase_, *lowerCamelCase_, sampling_rate=lowerCamelCase_, **lowerCamelCase_ )
if text is not None:
lowerCamelCase__ : List[Any] = self.tokenizer(lowerCamelCase_, **lowerCamelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ : Tuple = encodings['input_ids']
return inputs
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@contextmanager
def a__ (self ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = self.tokenizer
yield
lowerCamelCase__ : Optional[int] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
| 696 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Union[str, Any] = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 720 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : Optional[Any] = use_input_mask
lowerCamelCase__ : List[Any] = use_token_type_ids
lowerCamelCase__ : List[Any] = use_labels
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : Optional[int] = embedding_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Dict = num_choices
lowerCamelCase__ : Tuple = scope
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : List[str] = None
if self.use_input_mask:
lowerCamelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_token_type_ids:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Any = None
lowerCamelCase__ : Union[str, Any] = None
if self.use_labels:
lowerCamelCase__ : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ):
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, embedding_size=self.embedding_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, next_sentence_label=lowerCamelCase_, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, start_positions=lowerCamelCase_, end_positions=lowerCamelCase_, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : int = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.num_choices
lowerCamelCase__ : Dict = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : int = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : List[str] = config_and_inputs
lowerCamelCase__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : int = True
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
lowerCamelCase__ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase_ )
return inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = MobileBertModelTester(self )
lowerCamelCase__ : List[str] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
return torch.tensor(
_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase , )
A_ : Tuple = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(lowerCamelCase_ )
lowerCamelCase__ : Tuple = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )[0]
lowerCamelCase__ : Optional[int] = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[
[
[-2.4_736_526e07, 8.2_691_656e04, 1.6_521_838e05],
[-5.7_541_704e-01, 3.9_056_022e00, 4.4_011_507e00],
[2.6_047_359e00, 1.5_677_652e00, -1.7_324_188e-01],
]
], device=lowerCamelCase_, )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCamelCase__ : Optional[int] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCamelCase__ : Any = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 696 | 0 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class a_ ( logging.LoggerAdapter ):
'''simple docstring'''
@staticmethod
def a__ (lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def a__ (self, lowerCamelCase_, lowerCamelCase_, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
lowerCamelCase__ : Tuple = kwargs.pop('main_process_only', lowerCamelCase_ )
lowerCamelCase__ : Dict = kwargs.pop('in_order', lowerCamelCase_ )
if self.isEnabledFor(lowerCamelCase_ ):
if self._should_log(lowerCamelCase_ ):
lowerCamelCase__ : Tuple = self.process(lowerCamelCase_, lowerCamelCase_ )
self.logger.log(lowerCamelCase_, lowerCamelCase_, *lowerCamelCase_, **lowerCamelCase_ )
elif in_order:
lowerCamelCase__ : Dict = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
lowerCamelCase__ : List[str] = self.process(lowerCamelCase_, lowerCamelCase_ )
self.logger.log(lowerCamelCase_, lowerCamelCase_, *lowerCamelCase_, **lowerCamelCase_ )
state.wait_for_everyone()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase = None ):
if log_level is None:
lowerCamelCase__ : Optional[int] = os.environ.get('ACCELERATE_LOG_LEVEL' , _lowerCamelCase )
lowerCamelCase__ : int = logging.getLogger(_lowerCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_lowerCamelCase , {} )
| 721 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
A_ : str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, lowerCamelCase_=1 ):
'''simple docstring'''
lowerCamelCase__ : Any = tokenizer
lowerCamelCase__ : Optional[Any] = dataset
lowerCamelCase__ : int = len(lowerCamelCase_ ) if n_tasks is None else n_tasks
lowerCamelCase__ : Any = n_copies
def __iter__(self ):
'''simple docstring'''
lowerCamelCase__ : Dict = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ : Optional[int] = self.tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = start_length
lowerCamelCase__ : List[str] = eof_strings
lowerCamelCase__ : List[str] = tokenizer
def __call__(self, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ : Optional[Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = re.split('(%s)' % '|'.join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=20 , **_lowerCamelCase ):
lowerCamelCase__ : List[str] = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
lowerCamelCase__ : str = batch['ids'].shape[-1]
lowerCamelCase__ : int = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
lowerCamelCase__ : Optional[Any] = batch['task_id'].repeat(_lowerCamelCase )
lowerCamelCase__ : List[Any] = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ : List[Any] = generated_tokens.cpu().numpy()
lowerCamelCase__ : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
lowerCamelCase__ : str = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ : Optional[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def lowerCamelCase_ ( ):
# Setup configuration
lowerCamelCase__ : int = HfArgumentParser(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ : Tuple = 'false'
if args.num_workers is None:
lowerCamelCase__ : List[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ : List[Any] = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
lowerCamelCase__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ : Optional[int] = tokenizer.eos_token
lowerCamelCase__ : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ : Optional[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ : Any = load_dataset('openai_humaneval' )
lowerCamelCase__ : Optional[int] = load_metric('code_eval' )
lowerCamelCase__ : List[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ : Optional[int] = args.n_samples // args.batch_size
lowerCamelCase__ : Tuple = TokenizedDataset(_lowerCamelCase , human_eval['test'] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ : Union[str, Any] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ : List[Any] = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ : str = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
lowerCamelCase__ : List[str] = []
for task in tqdm(range(_lowerCamelCase ) ):
lowerCamelCase__ : int = human_eval['test'][task]['test']
lowerCamelCase__ : Union[str, Any] = f'''check({human_eval['test'][task]['entry_point']})'''
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ : Any = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 696 | 0 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
A_ : Dict = NewType("DataClass", Any)
A_ : Optional[Any] = NewType("DataClassType", Any)
def lowerCamelCase_ ( _lowerCamelCase ):
if isinstance(_lowerCamelCase , _lowerCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Tuple = {str(_lowerCamelCase ): choice for choice in choices}
return lambda _lowerCamelCase : str_to_choice.get(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( *,
_lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = dataclasses.MISSING , _lowerCamelCase = dataclasses.MISSING , _lowerCamelCase = None , **_lowerCamelCase , ):
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowerCamelCase__ : Optional[Any] = {}
if aliases is not None:
lowerCamelCase__ : int = aliases
if help is not None:
lowerCamelCase__ : Dict = help
return dataclasses.field(metadata=_lowerCamelCase , default=_lowerCamelCase , default_factory=_lowerCamelCase , **_lowerCamelCase )
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Iterable[DataClassType]
def __init__(self, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if "formatter_class" not in kwargs:
lowerCamelCase__ : Union[str, Any] = ArgumentDefaultsHelpFormatter
super().__init__(**lowerCamelCase_ )
if dataclasses.is_dataclass(lowerCamelCase_ ):
lowerCamelCase__ : int = [dataclass_types]
lowerCamelCase__ : Dict = list(lowerCamelCase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowerCamelCase_ )
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = f'''--{field.name}'''
lowerCamelCase__ : Union[str, Any] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type, lowerCamelCase_ ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
lowerCamelCase__ : Dict = kwargs.pop('aliases', [] )
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Optional[int] = [aliases]
lowerCamelCase__ : Optional[int] = getattr(field.type, '__origin__', field.type )
if origin_type is Union or (hasattr(lowerCamelCase_, 'UnionType' ) and isinstance(lowerCamelCase_, types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowerCamelCase_ ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
f''' Problem encountered in field \'{field.name}\'.''' )
if type(lowerCamelCase_ ) not in field.type.__args__:
# filter `str` in Union
lowerCamelCase__ : Dict = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowerCamelCase__ : Tuple = getattr(field.type, '__origin__', field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowerCamelCase__ : int = (
field.type.__args__[0] if isinstance(lowerCamelCase_, field.type.__args__[1] ) else field.type.__args__[1]
)
lowerCamelCase__ : Optional[int] = getattr(field.type, '__origin__', field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowerCamelCase__ : Any = {}
if origin_type is Literal or (isinstance(field.type, lowerCamelCase_ ) and issubclass(field.type, lowerCamelCase_ )):
if origin_type is Literal:
lowerCamelCase__ : Union[str, Any] = field.type.__args__
else:
lowerCamelCase__ : Optional[int] = [x.value for x in field.type]
lowerCamelCase__ : Tuple = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
lowerCamelCase__ : List[str] = field.default
else:
lowerCamelCase__ : Optional[Any] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowerCamelCase__ : Dict = copy(lowerCamelCase_ )
# Hack because type=bool in argparse does not behave as we want.
lowerCamelCase__ : int = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowerCamelCase__ : Optional[int] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowerCamelCase__ : Dict = default
# This tells argparse we accept 0 or 1 value after --field_name
lowerCamelCase__ : List[Any] = '?'
# This is the value that will get picked if we do --field_name (without value)
lowerCamelCase__ : int = True
elif isclass(lowerCamelCase_ ) and issubclass(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Tuple = field.type.__args__[0]
lowerCamelCase__ : Optional[int] = '+'
if field.default_factory is not dataclasses.MISSING:
lowerCamelCase__ : Optional[int] = field.default_factory()
elif field.default is dataclasses.MISSING:
lowerCamelCase__ : Optional[Any] = True
else:
lowerCamelCase__ : Optional[int] = field.type
if field.default is not dataclasses.MISSING:
lowerCamelCase__ : int = field.default
elif field.default_factory is not dataclasses.MISSING:
lowerCamelCase__ : List[str] = field.default_factory()
else:
lowerCamelCase__ : List[Any] = True
parser.add_argument(lowerCamelCase_, *lowerCamelCase_, **lowerCamelCase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowerCamelCase__ : Dict = False
parser.add_argument(f'''--no_{field.name}''', action='store_false', dest=field.name, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if hasattr(lowerCamelCase_, '_argument_group_name' ):
lowerCamelCase__ : Tuple = self.add_argument_group(dtype._argument_group_name )
else:
lowerCamelCase__ : Dict = self
try:
lowerCamelCase__ : Dict[str, type] = get_type_hints(lowerCamelCase_ )
except NameError:
raise RuntimeError(
f'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(lowerCamelCase_ ):
lowerCamelCase__ : List[str] = '.'.join(map(lowerCamelCase_, sys.version_info[:3] ) )
raise RuntimeError(
f'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(lowerCamelCase_ ):
if not field.init:
continue
lowerCamelCase__ : Optional[Any] = type_hints[field.name]
self._parse_dataclass_field(lowerCamelCase_, lowerCamelCase_ )
def a__ (self, lowerCamelCase_=None, lowerCamelCase_=False, lowerCamelCase_=True, lowerCamelCase_=None, lowerCamelCase_=None, ):
'''simple docstring'''
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowerCamelCase__ : Tuple = []
if args_filename:
args_files.append(Path(lowerCamelCase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowerCamelCase__ : str = ArgumentParser()
args_file_parser.add_argument(lowerCamelCase_, type=lowerCamelCase_, action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
lowerCamelCase__ : str = args_file_parser.parse_known_args(args=lowerCamelCase_ )
lowerCamelCase__ : List[Any] = vars(lowerCamelCase_ ).get(args_file_flag.lstrip('-' ), lowerCamelCase_ )
if cmd_args_file_paths:
args_files.extend([Path(lowerCamelCase_ ) for p in cmd_args_file_paths] )
lowerCamelCase__ : Tuple = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowerCamelCase__ : str = file_args + args if args is not None else file_args + sys.argv[1:]
lowerCamelCase__ : Tuple = self.parse_known_args(args=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = []
for dtype in self.dataclass_types:
lowerCamelCase__ : Union[str, Any] = {f.name for f in dataclasses.fields(lowerCamelCase_ ) if f.init}
lowerCamelCase__ : Union[str, Any] = {k: v for k, v in vars(lowerCamelCase_ ).items() if k in keys}
for k in keys:
delattr(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = dtype(**lowerCamelCase_ )
outputs.append(lowerCamelCase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowerCamelCase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def a__ (self, lowerCamelCase_, lowerCamelCase_ = False ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = set(args.keys() )
lowerCamelCase__ : List[Any] = []
for dtype in self.dataclass_types:
lowerCamelCase__ : Any = {f.name for f in dataclasses.fields(lowerCamelCase_ ) if f.init}
lowerCamelCase__ : Optional[int] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowerCamelCase__ : List[Any] = dtype(**lowerCamelCase_ )
outputs.append(lowerCamelCase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(f'''Some keys are not used by the HfArgumentParser: {sorted(lowerCamelCase_ )}''' )
return tuple(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ = False ):
'''simple docstring'''
with open(Path(lowerCamelCase_ ), encoding='utf-8' ) as open_json_file:
lowerCamelCase__ : List[str] = json.loads(open_json_file.read() )
lowerCamelCase__ : Optional[Any] = self.parse_dict(lowerCamelCase_, allow_extra_keys=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ = False ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.parse_dict(yaml.safe_load(Path(lowerCamelCase_ ).read_text() ), allow_extra_keys=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 700 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : str = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
| 696 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : str = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = 'gpt_neo'
lowerCamelCase__ : Optional[Any] = ['past_key_values']
lowerCamelCase__ : List[Any] = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__(self, lowerCamelCase_=5_0_2_5_7, lowerCamelCase_=2_0_4_8, lowerCamelCase_=2_0_4_8, lowerCamelCase_=2_4, lowerCamelCase_=[[["global", "local"], 1_2]], lowerCamelCase_=1_6, lowerCamelCase_=None, lowerCamelCase_=2_5_6, lowerCamelCase_="gelu_new", lowerCamelCase_=0.0, lowerCamelCase_=0.0, lowerCamelCase_=0.0, lowerCamelCase_=0.1, lowerCamelCase_=1e-5, lowerCamelCase_=0.02, lowerCamelCase_=True, lowerCamelCase_=5_0_2_5_6, lowerCamelCase_=5_0_2_5_6, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : str = vocab_size
lowerCamelCase__ : int = max_position_embeddings
lowerCamelCase__ : Dict = hidden_size
lowerCamelCase__ : List[str] = num_layers
lowerCamelCase__ : Optional[Any] = num_heads
lowerCamelCase__ : int = intermediate_size
lowerCamelCase__ : Dict = window_size
lowerCamelCase__ : str = activation_function
lowerCamelCase__ : List[str] = resid_dropout
lowerCamelCase__ : List[Any] = embed_dropout
lowerCamelCase__ : Optional[Any] = attention_dropout
lowerCamelCase__ : Tuple = classifier_dropout
lowerCamelCase__ : Tuple = layer_norm_epsilon
lowerCamelCase__ : Any = initializer_range
lowerCamelCase__ : Optional[int] = use_cache
lowerCamelCase__ : Any = bos_token_id
lowerCamelCase__ : Optional[Any] = eos_token_id
lowerCamelCase__ : Optional[int] = attention_types
lowerCamelCase__ : List[Any] = self.expand_attention_types_params(lowerCamelCase_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
f'''`config.num_layers = {self.num_layers}`. '''
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, **lowerCamelCase_ )
@staticmethod
def a__ (lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
import torch
lowerCamelCase__ : List[Any] = input.size()
lowerCamelCase__ : Dict = len(_lowerCamelCase )
lowerCamelCase__ : Dict = shape[dimension]
lowerCamelCase__ : str = torch.arange(0 , _lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = torch.div(sizedim - size , _lowerCamelCase , rounding_mode='floor' ) + 1
lowerCamelCase__ : Tuple = torch.arange(_lowerCamelCase ) + low_indices[:min_length][:, None]
lowerCamelCase__ : int = [slice(_lowerCamelCase )] * rank
lowerCamelCase__ : Dict = indices
lowerCamelCase__ : List[str] = input[s]
lowerCamelCase__ : int = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
import torch
lowerCamelCase__ : int = torch.arange(1 , _lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = torch.remainder(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : List[str] = remainders == 0
lowerCamelCase__ : List[Any] = candidates[divisor_indices]
lowerCamelCase__ : List[str] = torch.max(_lowerCamelCase )
return largest_divisor, torch.div(_lowerCamelCase , _lowerCamelCase , rounding_mode='floor' )
class a_ ( snake_case_ ):
'''simple docstring'''
@property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_, direction='inputs' )
lowerCamelCase__ : Optional[Any] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
lowerCamelCase__ : List[str] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def a__ (self ):
'''simple docstring'''
return self._config.num_heads
def a__ (self, lowerCamelCase_, lowerCamelCase_ = -1, lowerCamelCase_ = -1, lowerCamelCase_ = False, lowerCamelCase_ = None, ):
'''simple docstring'''
lowerCamelCase__ : Any = super(lowerCamelCase_, self ).generate_dummy_inputs(
lowerCamelCase_, batch_size=lowerCamelCase_, seq_length=lowerCamelCase_, is_pair=lowerCamelCase_, framework=lowerCamelCase_ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase__ : List[str] = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
lowerCamelCase__ : Optional[Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
lowerCamelCase__ : Optional[int] = seqlen + 2
lowerCamelCase__ : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase__ : Optional[Any] = [
(torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) for _ in range(self.num_layers )
]
lowerCamelCase__ : str = common_inputs['attention_mask']
if self.use_past:
lowerCamelCase__ : List[str] = ordered_inputs['attention_mask'].dtype
lowerCamelCase__ : Dict = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCamelCase_, lowerCamelCase_, dtype=lowerCamelCase_ )], dim=1 )
return ordered_inputs
@property
def a__ (self ):
'''simple docstring'''
return 1_3
| 701 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = 0
while number > 0:
lowerCamelCase__ : List[str] = number % 10
sum_of_digits += last_digit
lowerCamelCase__ : str = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCamelCase_ ( _lowerCamelCase = 100 ):
lowerCamelCase__ : Union[str, Any] = factorial(_lowerCamelCase )
lowerCamelCase__ : List[Any] = split_and_add(_lowerCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 696 | 0 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
A_ : Union[str, Any] = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
A_ : int = logging.get_logger(__name__)
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = 'maskformer'
lowerCamelCase__ : Tuple = {'hidden_size': 'mask_feature_size'}
lowerCamelCase__ : List[Any] = ['resnet', 'swin']
lowerCamelCase__ : Union[str, Any] = ['detr']
def __init__(self, lowerCamelCase_ = 2_5_6, lowerCamelCase_ = 2_5_6, lowerCamelCase_ = 0.1, lowerCamelCase_ = False, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = 0.02, lowerCamelCase_ = 1.0, lowerCamelCase_ = 1.0, lowerCamelCase_ = 1.0, lowerCamelCase_ = 20.0, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowerCamelCase__ : Optional[int] = SwinConfig(
image_size=3_8_4, in_channels=3, patch_size=4, embed_dim=1_2_8, depths=[2, 2, 1_8, 2], num_heads=[4, 8, 1_6, 3_2], window_size=1_2, drop_path_rate=0.3, out_features=['stage1', 'stage2', 'stage3', 'stage4'], )
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : List[str] = backbone_config.pop('model_type' )
lowerCamelCase__ : Tuple = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase__ : int = config_class.from_dict(lowerCamelCase_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowerCamelCase__ : Union[str, Any] = DetrConfig()
else:
# verify that the decoder is supported
lowerCamelCase__ : Union[str, Any] = (
decoder_config.pop('model_type' ) if isinstance(lowerCamelCase_, lowerCamelCase_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {','.join(self.decoders_supported )}''' )
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Tuple = CONFIG_MAPPING[decoder_type]
lowerCamelCase__ : int = config_class.from_dict(lowerCamelCase_ )
lowerCamelCase__ : int = backbone_config
lowerCamelCase__ : List[str] = decoder_config
# main feature dimension for the model
lowerCamelCase__ : Optional[Any] = fpn_feature_size
lowerCamelCase__ : int = mask_feature_size
# initializer
lowerCamelCase__ : Optional[Any] = init_std
lowerCamelCase__ : Union[str, Any] = init_xavier_std
# Hungarian matcher && loss
lowerCamelCase__ : Tuple = cross_entropy_weight
lowerCamelCase__ : Union[str, Any] = dice_weight
lowerCamelCase__ : str = mask_weight
lowerCamelCase__ : int = use_auxiliary_loss
lowerCamelCase__ : List[Any] = no_object_weight
lowerCamelCase__ : int = output_auxiliary_logits
lowerCamelCase__ : Any = self.decoder_config.encoder_attention_heads
lowerCamelCase__ : int = self.decoder_config.num_hidden_layers
super().__init__(**lowerCamelCase_ )
@classmethod
def a__ (cls, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return cls(
backbone_config=lowerCamelCase_, decoder_config=lowerCamelCase_, **lowerCamelCase_, )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = copy.deepcopy(self.__dict__ )
lowerCamelCase__ : str = self.backbone_config.to_dict()
lowerCamelCase__ : Optional[Any] = self.decoder_config.to_dict()
lowerCamelCase__ : Any = self.__class__.model_type
return output
| 702 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ : Dict = "pt"
elif is_tf_available():
A_ : Union[str, Any] = "tf"
else:
A_ : List[str] = "jax"
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = PerceiverTokenizer
lowerCamelCase__ : Optional[Any] = False
def a__ (self ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : int = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ (self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_=False, lowerCamelCase_=2_0, lowerCamelCase_=5 ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = []
for i in range(len(lowerCamelCase_ ) ):
try:
lowerCamelCase__ : Any = tokenizer.decode([i], clean_up_tokenization_spaces=lowerCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : Any = list(filter(lambda lowerCamelCase_ : re.match(r'^[ a-zA-Z]+$', t[1] ), lowerCamelCase_ ) )
lowerCamelCase__ : Union[str, Any] = list(filter(lambda lowerCamelCase_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=lowerCamelCase_ ), lowerCamelCase_ ) )
if max_length is not None and len(lowerCamelCase_ ) > max_length:
lowerCamelCase__ : int = toks[:max_length]
if min_length is not None and len(lowerCamelCase_ ) < min_length and len(lowerCamelCase_ ) > 0:
while len(lowerCamelCase_ ) < min_length:
lowerCamelCase__ : Dict = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : int = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : Optional[int] = tokenizer.decode(lowerCamelCase_, clean_up_tokenization_spaces=lowerCamelCase_ )
if " " not in output_txt and len(lowerCamelCase_ ) > 1:
lowerCamelCase__ : List[Any] = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=lowerCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=lowerCamelCase_ )
)
if with_prefix_space:
lowerCamelCase__ : Optional[Any] = ' ' + output_txt
lowerCamelCase__ : List[Any] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
return output_txt, output_ids
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = 'Unicode €.'
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_ )
lowerCamelCase__ : Dict = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : int = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]Unicode €.[SEP]' )
lowerCamelCase__ : List[str] = tokenizer('e è é ê ë' )
lowerCamelCase__ : Dict = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : Any = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ), '[CLS]e è é ê ë[SEP]' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
lowerCamelCase__ : List[Any] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
if FRAMEWORK != "jax":
lowerCamelCase__ : List[str] = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : int = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertEqual((2, 3_8), batch.input_ids.shape )
self.assertEqual((2, 3_8), batch.attention_mask.shape )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.perceiver_tokenizer
lowerCamelCase__ : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCamelCase__ : List[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids', lowerCamelCase_ )
self.assertIn('attention_mask', lowerCamelCase_ )
self.assertNotIn('decoder_input_ids', lowerCamelCase_ )
self.assertNotIn('decoder_attention_mask', lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : int = [
'Summary of the text.',
'Another summary.',
]
lowerCamelCase__ : str = tokenizer(
text_target=lowerCamelCase_, max_length=3_2, padding='max_length', truncation=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertEqual(3_2, targets['input_ids'].shape[1] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 4_2 )
# Now let's start the test
lowerCamelCase__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : str = ' He is very happy, UNwant\u00E9d,running'
lowerCamelCase__ : str = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : str = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
shutil.rmtree(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
lowerCamelCase__ : List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCamelCase__ : List[str] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : int = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertIn('new_additional_special_token', after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 4_2 )
lowerCamelCase__ : List[Any] = tokenizer.__class__.from_pretrained(lowerCamelCase_, model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length, 4_3 )
shutil.rmtree(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : Optional[Any] = json.load(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : List[str] = json.load(lowerCamelCase_ )
lowerCamelCase__ : Any = [f'''<extra_id_{i}>''' for i in range(1_2_5 )]
lowerCamelCase__ : Optional[int] = added_tokens_extra_ids + [
'an_additional_special_token'
]
lowerCamelCase__ : List[str] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : Dict = tokenizer_class.from_pretrained(
lowerCamelCase_, )
self.assertIn(
'an_additional_special_token', tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token', lstrip=lowerCamelCase_ )]
lowerCamelCase__ : Any = tokenizer_class.from_pretrained(
lowerCamelCase_, additional_special_tokens=lowerCamelCase_, )
self.assertIn('a_new_additional_special_token', tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ), )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ), '�' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.get_tokenizers(fast=lowerCamelCase_, do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : Tuple = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
lowerCamelCase__ : List[str] = tokenizer.convert_tokens_to_string(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = KandinskyVaaInpaintPipeline
lowerCamelCase__ : Any = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
lowerCamelCase__ : List[Any] = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
lowerCamelCase__ : Optional[int] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase__ : int = False
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a__ (self ):
'''simple docstring'''
return 1_0_0
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase__ : str = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def a__ (self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] = VQModel(**self.dummy_movq_kwargs )
return model
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.dummy_unet
lowerCamelCase__ : int = self.dummy_movq
lowerCamelCase__ : Any = DDIMScheduler(
num_train_timesteps=1_0_0_0, beta_schedule='linear', beta_start=0.00_085, beta_end=0.012, clip_sample=lowerCamelCase_, set_alpha_to_one=lowerCamelCase_, steps_offset=1, prediction_type='epsilon', thresholding=lowerCamelCase_, )
lowerCamelCase__ : List[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
lowerCamelCase__ : Tuple = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create init_image
lowerCamelCase__ : str = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase__ : Optional[int] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create mask
lowerCamelCase__ : Tuple = np.ones((6_4, 6_4), dtype=np.floataa )
lowerCamelCase__ : List[Any] = 0
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : List[Any] = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : int = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : List[str] = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = 'cpu'
lowerCamelCase__ : int = self.get_dummy_components()
lowerCamelCase__ : Optional[int] = self.pipeline_class(**lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Any = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
lowerCamelCase__ : List[Any] = output.images
lowerCamelCase__ : Union[str, Any] = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ), return_dict=lowerCamelCase_, )[0]
lowerCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
lowerCamelCase__ : str = image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase__ : int = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def a__ (self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
lowerCamelCase__ : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase__ : Any = np.ones((7_6_8, 7_6_8), dtype=np.floataa )
lowerCamelCase__ : str = 0
lowerCamelCase__ : Any = 'a hat'
lowerCamelCase__ : List[str] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
lowerCamelCase__ : int = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint', torch_dtype=torch.floataa )
lowerCamelCase__ : List[Any] = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ : int = pipe_prior(
lowerCamelCase_, generator=lowerCamelCase_, num_inference_steps=5, negative_prompt='', ).to_tuple()
lowerCamelCase__ : List[str] = pipeline(
image=lowerCamelCase_, mask_image=lowerCamelCase_, image_embeds=lowerCamelCase_, negative_image_embeds=lowerCamelCase_, generator=lowerCamelCase_, num_inference_steps=1_0_0, height=7_6_8, width=7_6_8, output_type='np', )
lowerCamelCase__ : Tuple = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCamelCase_, lowerCamelCase_ )
| 703 |
"""simple docstring"""
from math import pi, sqrt, tan
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCamelCase__ : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_lowerCamelCase , 2 ) * torus_radius * tube_radius
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCamelCase__ : Dict = (sidea + sidea + sidea) / 2
lowerCamelCase__ : str = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 696 | 0 |
"""simple docstring"""
A_ : List[str] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 704 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : str = use_input_mask
lowerCamelCase__ : Optional[Any] = use_token_type_ids
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Optional[int] = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Dict = type_vocab_size
lowerCamelCase__ : Union[str, Any] = type_sequence_label_size
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : Union[str, Any] = num_choices
lowerCamelCase__ : List[str] = scope
lowerCamelCase__ : Dict = vocab_size - 1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Optional[Any] = None
if self.use_input_mask:
lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = self.get_config()
return config, input_ids, input_mask, token_labels
def a__ (self ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] = True
return config, input_ids, input_mask, token_labels
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = GPTNeoXModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.num_labels
lowerCamelCase__ : Optional[Any] = GPTNeoXForQuestionAnswering(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = self.num_labels
lowerCamelCase__ : Optional[int] = GPTNeoXForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[Any] = GPTNeoXForTokenClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Tuple = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# first forward pass
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, use_cache=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ : str = ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : List[Any] = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
lowerCamelCase__ : Tuple = torch.cat([input_ids, next_tokens], dim=-1 )
lowerCamelCase__ : Tuple = torch.cat([input_mask, next_mask], dim=-1 )
lowerCamelCase__ : List[str] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, output_hidden_states=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = output_from_no_past['hidden_states'][0]
lowerCamelCase__ : Optional[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, past_key_values=lowerCamelCase_, output_hidden_states=lowerCamelCase_, )['hidden_states'][0]
# select random slice
lowerCamelCase__ : Dict = ids_tensor((1,), output_from_past.shape[-1] ).item()
lowerCamelCase__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-3 ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = config_and_inputs
lowerCamelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : int = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ : Dict = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = GPTNeoXModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=6_4, num_attention_heads=8 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCamelCase__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def a__ (self ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[Any] = ids_tensor([1, 1_0], config.vocab_size )
lowerCamelCase__ : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Any = GPTNeoXModel(lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
original_model.eval()
lowerCamelCase__ : List[Any] = original_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = original_model(lowerCamelCase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Optional[int] = {'type': scaling_type, 'factor': 10.0}
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
scaled_model.to(lowerCamelCase_ )
scaled_model.eval()
lowerCamelCase__ : Tuple = scaled_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = scaled_model(lowerCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
lowerCamelCase__ : Optional[Any] = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = tokenizer('My favorite food is', return_tensors='pt' ).to(lowerCamelCase_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
lowerCamelCase__ : Dict = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
lowerCamelCase__ : Dict = model.generate(**lowerCamelCase_, do_sample=lowerCamelCase_, max_new_tokens=2_0 )
lowerCamelCase__ : Optional[Any] = tokenizer.batch_decode(lowerCamelCase_ )[0]
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
A_ : Union[str, Any] = {"UserAgent": UserAgent().random}
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : int = script.contents[0]
lowerCamelCase__ : Union[str, Any] = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = f'''https://www.instagram.com/{username}/'''
lowerCamelCase__ : Dict = self.get_json()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = requests.get(self.url, headers=lowerCamelCase_ ).text
lowerCamelCase__ : str = BeautifulSoup(lowerCamelCase_, 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__(self ):
'''simple docstring'''
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__(self ):
'''simple docstring'''
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def a__ (self ):
'''simple docstring'''
return self.user_data["username"]
@property
def a__ (self ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def a__ (self ):
'''simple docstring'''
return self.user_data["biography"]
@property
def a__ (self ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def a__ (self ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def a__ (self ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def a__ (self ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def a__ (self ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def a__ (self ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def a__ (self ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def a__ (self ):
'''simple docstring'''
return self.user_data["is_private"]
def lowerCamelCase_ ( _lowerCamelCase = "github" ):
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
lowerCamelCase__ : Dict = InstagramUser(_lowerCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _lowerCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 12_0000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : List[Any] = InstagramUser("github")
print(instagram_user)
print(f"{instagram_user.number_of_posts = }")
print(f"{instagram_user.number_of_followers = }")
print(f"{instagram_user.number_of_followings = }")
print(f"{instagram_user.email = }")
print(f"{instagram_user.website = }")
print(f"{instagram_user.profile_picture_url = }")
print(f"{instagram_user.is_verified = }")
print(f"{instagram_user.is_private = }")
| 705 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A_ : Dict = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
A_ : List[Any] = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
A_ : Union[str, Any] = spec.loader.load_module()
A_ : int = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A_ : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
A_ : str = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def lowerCamelCase_ ( ):
lowerCamelCase__ : Dict = []
for config_class in list(CONFIG_MAPPING.values() ):
lowerCamelCase__ : Dict = False
# source code of `config_class`
lowerCamelCase__ : str = inspect.getsource(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = _re_checkpoint.findall(_lowerCamelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
lowerCamelCase__ : Any = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
lowerCamelCase__ : Any = True
break
lowerCamelCase__ : Dict = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowerCamelCase__ : Optional[Any] = '\n'.join(sorted(_lowerCamelCase ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 696 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : int = logging.get_logger(__name__)
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : str = 'timm_backbone'
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=3, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=None, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
lowerCamelCase__ : str = backbone
lowerCamelCase__ : List[Any] = num_channels
lowerCamelCase__ : int = features_only
lowerCamelCase__ : Union[str, Any] = use_pretrained_backbone
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : List[Any] = out_indices if out_indices is not None else (-1,)
| 706 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Tuple = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 696 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : Union[str, Any] = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Any = 'transfo-xl'
lowerCamelCase__ : str = ['mems']
lowerCamelCase__ : int = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self, lowerCamelCase_=2_6_7_7_3_5, lowerCamelCase_=[2_0_0_0_0, 4_0_0_0_0, 2_0_0_0_0_0], lowerCamelCase_=1_0_2_4, lowerCamelCase_=1_0_2_4, lowerCamelCase_=1_6, lowerCamelCase_=6_4, lowerCamelCase_=4_0_9_6, lowerCamelCase_=4, lowerCamelCase_=False, lowerCamelCase_=1_8, lowerCamelCase_=1_6_0_0, lowerCamelCase_=1_0_0_0, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=0, lowerCamelCase_=-1, lowerCamelCase_=True, lowerCamelCase_=0.1, lowerCamelCase_=0.0, lowerCamelCase_=True, lowerCamelCase_="normal", lowerCamelCase_=0.01, lowerCamelCase_=0.01, lowerCamelCase_=0.02, lowerCamelCase_=1e-5, lowerCamelCase_=0, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : Dict = []
self.cutoffs.extend(lowerCamelCase_ )
if proj_share_all_but_first:
lowerCamelCase__ : Dict = [False] + [True] * len(self.cutoffs )
else:
lowerCamelCase__ : Tuple = [False] + [False] * len(self.cutoffs )
lowerCamelCase__ : Union[str, Any] = d_model
lowerCamelCase__ : Optional[Any] = d_embed
lowerCamelCase__ : Optional[Any] = d_head
lowerCamelCase__ : Optional[int] = d_inner
lowerCamelCase__ : Optional[Any] = div_val
lowerCamelCase__ : List[str] = pre_lnorm
lowerCamelCase__ : List[Any] = n_layer
lowerCamelCase__ : Tuple = n_head
lowerCamelCase__ : Union[str, Any] = mem_len
lowerCamelCase__ : str = same_length
lowerCamelCase__ : Tuple = attn_type
lowerCamelCase__ : Union[str, Any] = clamp_len
lowerCamelCase__ : List[Any] = sample_softmax
lowerCamelCase__ : List[Any] = adaptive
lowerCamelCase__ : List[Any] = dropout
lowerCamelCase__ : int = dropatt
lowerCamelCase__ : Dict = untie_r
lowerCamelCase__ : Tuple = init
lowerCamelCase__ : Union[str, Any] = init_range
lowerCamelCase__ : Union[str, Any] = proj_init_std
lowerCamelCase__ : Union[str, Any] = init_std
lowerCamelCase__ : str = layer_norm_epsilon
super().__init__(eos_token_id=lowerCamelCase_, **lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 707 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
A_ : Optional[int] = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
A_ : List[str] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
A_ : Tuple = BeautifulSoup(res.text, "html.parser")
A_ : Dict = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f"https://google.com{link.get('href')}")
| 696 | 0 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ['image_processor', 'tokenizer']
lowerCamelCase__ : Dict = 'AutoImageProcessor'
lowerCamelCase__ : List[str] = 'AutoTokenizer'
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', lowerCamelCase_, )
lowerCamelCase__ : List[str] = kwargs.pop('feature_extractor' )
lowerCamelCase__ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = self.image_processor
lowerCamelCase__ : Union[str, Any] = False
def __call__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase_, **lowerCamelCase_ )
lowerCamelCase__ : List[Any] = kwargs.pop('images', lowerCamelCase_ )
lowerCamelCase__ : Any = kwargs.pop('text', lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
lowerCamelCase__ : Any = args[0]
lowerCamelCase__ : Tuple = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
lowerCamelCase__ : Optional[Any] = self.image_processor(lowerCamelCase_, *lowerCamelCase_, **lowerCamelCase_ )
if text is not None:
lowerCamelCase__ : Optional[int] = self.tokenizer(lowerCamelCase_, **lowerCamelCase_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCamelCase__ : Dict = encodings['input_ids']
return inputs
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@contextmanager
def a__ (self ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : Union[str, Any] = self.tokenizer
yield
lowerCamelCase__ : Optional[int] = self.image_processor
lowerCamelCase__ : str = False
def a__ (self, lowerCamelCase_, lowerCamelCase_=False, lowerCamelCase_=None ):
'''simple docstring'''
if added_vocab is None:
lowerCamelCase__ : Dict = self.tokenizer.get_added_vocab()
lowerCamelCase__ : Optional[Any] = {}
while tokens:
lowerCamelCase__ : Dict = re.search(r'<s_(.*?)>', lowerCamelCase_, re.IGNORECASE )
if start_token is None:
break
lowerCamelCase__ : List[Any] = start_token.group(1 )
lowerCamelCase__ : Dict = re.search(rf'''</s_{key}>''', lowerCamelCase_, re.IGNORECASE )
lowerCamelCase__ : Tuple = start_token.group()
if end_token is None:
lowerCamelCase__ : Optional[int] = tokens.replace(lowerCamelCase_, '' )
else:
lowerCamelCase__ : str = end_token.group()
lowerCamelCase__ : str = re.escape(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = re.escape(lowerCamelCase_ )
lowerCamelCase__ : Any = re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''', lowerCamelCase_, re.IGNORECASE )
if content is not None:
lowerCamelCase__ : Dict = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowerCamelCase__ : Any = self.tokenajson(lowerCamelCase_, is_inner_value=lowerCamelCase_, added_vocab=lowerCamelCase_ )
if value:
if len(lowerCamelCase_ ) == 1:
lowerCamelCase__ : List[Any] = value[0]
lowerCamelCase__ : Dict = value
else: # leaf nodes
lowerCamelCase__ : Dict = []
for leaf in content.split(r'<sep/>' ):
lowerCamelCase__ : List[str] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowerCamelCase__ : Union[str, Any] = leaf[1:-2] # for categorical special tokens
output[key].append(lowerCamelCase_ )
if len(output[key] ) == 1:
lowerCamelCase__ : Optional[int] = output[key][0]
lowerCamelCase__ : Any = tokens[tokens.find(lowerCamelCase_ ) + len(lowerCamelCase_ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=lowerCamelCase_, added_vocab=lowerCamelCase_ )
if len(lowerCamelCase_ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def a__ (self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', lowerCamelCase_, )
return self.image_processor_class
@property
def a__ (self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', lowerCamelCase_, )
return self.image_processor
| 708 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowerCamelCase__ : Tuple = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_, cache_dir=lowerCamelCase_ )
lowerCamelCase__ : List[str] = [t[-1] for t in os.walk(os.path.join(lowerCamelCase_, os.listdir(lowerCamelCase_ )[0], 'snapshots' ) )]
lowerCamelCase__ : Optional[int] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[int] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Any = 4
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : List[Any] = num_samples * [prompt]
lowerCamelCase__ : Optional[int] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : int = replicate(lowerCamelCase_ )
lowerCamelCase__ : Any = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : int = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1_514_745 ) < 1e-3
assert np.abs(np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 49_947.875 ) < 5e-1
lowerCamelCase__ : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase_ ) == num_samples
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='flax', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : int = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[str] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : List[str] = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = shard(lowerCamelCase_ )
lowerCamelCase__ : str = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05_652_401) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_383_808.2) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Union[str, Any] = 5_0
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : Tuple = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Any = replicate(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : int = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa )
lowerCamelCase__ : Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Union[str, Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Optional[Any] = 5_0
lowerCamelCase__ : Tuple = jax.device_count()
lowerCamelCase__ : Optional[int] = num_samples * [prompt]
lowerCamelCase__ : str = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Optional[int] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : List[str] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = FlaxDDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='scaled_linear', set_alpha_to_one=lowerCamelCase_, steps_offset=1, )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, scheduler=lowerCamelCase_, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : List[str] = scheduler.create_state()
lowerCamelCase__ : int = scheduler_state
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : Optional[Any] = jax.device_count()
lowerCamelCase__ : Any = num_samples * [prompt]
lowerCamelCase__ : Any = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Dict = shard(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045_043_945) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_347_693.5) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : int = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : str = jax.random.split(jax.random.PRNGKey(0 ), lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Dict = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Tuple = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : int = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
lowerCamelCase__ , lowerCamelCase__ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, use_memory_efficient_attention=lowerCamelCase_, )
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : Any = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : Any = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 696 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
return str(_lowerCamelCase ) == str(_lowerCamelCase )[::-1]
def lowerCamelCase_ ( _lowerCamelCase ):
return int(_lowerCamelCase ) + int(str(_lowerCamelCase )[::-1] )
def lowerCamelCase_ ( _lowerCamelCase = 1_0000 ):
lowerCamelCase__ : Optional[Any] = []
for num in range(1 , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Dict = num
while iterations < 50:
lowerCamelCase__ : Optional[Any] = sum_reverse(_lowerCamelCase )
iterations += 1
if is_palindrome(_lowerCamelCase ):
break
else:
lychrel_nums.append(_lowerCamelCase )
return len(_lowerCamelCase )
if __name__ == "__main__":
print(f"{solution() = }")
| 709 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
A_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_, scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__(self, lowerCamelCase_ = 1, lowerCamelCase_ = 1_0_0, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = True, ):
'''simple docstring'''
if audio_length_in_s is None:
lowerCamelCase__ : str = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCamelCase__ : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate
lowerCamelCase__ : str = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowerCamelCase__ : Dict = int(lowerCamelCase_ )
if sample_size % down_scale_factor != 0:
lowerCamelCase__ : Union[str, Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
' process.' )
lowerCamelCase__ : Optional[Any] = int(lowerCamelCase_ )
lowerCamelCase__ : List[str] = next(iter(self.unet.parameters() ) ).dtype
lowerCamelCase__ : Union[str, Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase_, lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase__ : Union[str, Any] = randn_tensor(lowerCamelCase_, generator=lowerCamelCase_, device=self.device, dtype=lowerCamelCase_ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_, device=audio.device )
lowerCamelCase__ : int = self.scheduler.timesteps.to(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase__ : List[Any] = self.unet(lowerCamelCase_, lowerCamelCase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCamelCase__ : List[str] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Union[str, Any] = audio.clamp(-1, 1 ).float().cpu().numpy()
lowerCamelCase__ : Tuple = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Dict = len(_lowerCamelCase )
lowerCamelCase__ : Dict = len(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
lowerCamelCase__ : str = True
for i in range(_lowerCamelCase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCamelCase__ : Optional[int] = True
if a[i].islower():
lowerCamelCase__ : List[str] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(lowerCamelCase_ ) )
vocab_file.flush()
lowerCamelCase__ : Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase__ : Optional[Any] = BertModel(BertConfig(vocab_size=len(lowerCamelCase_ ) ) )
model.save_pretrained(lowerCamelCase_ )
self._test_export(lowerCamelCase_, 'pt', 1_2, lowerCamelCase_ )
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Optional[Any] = self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Any = quantize(Path(lowerCamelCase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Any = self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = quantize(lowerCamelCase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase__ : str = Path(lowerCamelCase_ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ )
return path
except Exception as e:
self.fail(lowerCamelCase_ )
@require_torch
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : str = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'pt' )
@require_tf
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import TFBertModel
lowerCamelCase__ : Dict = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Optional[int] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'tf' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = FeatureExtractionPipeline(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = infer_shapes(lowerCamelCase_, lowerCamelCase_ )
# Assert all variables are present
self.assertEqual(len(lowerCamelCase_ ), len(lowerCamelCase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3], lowerCamelCase_ )
self.assertSequenceEqual(variable_names[3:], lowerCamelCase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name], {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'], {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'], {0: 'batch'} )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['input_ids', 'attention_mask', 'token_type_ids']
lowerCamelCase__ : Optional[int] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowerCamelCase__ , lowerCamelCase__ : str = ensure_valid_input(FuncContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCamelCase_ ), 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCamelCase_ ), set(lowerCamelCase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCamelCase_, (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase__ , lowerCamelCase__ : Any = ensure_valid_input(FuncNonContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCamelCase_ ), 1 )
self.assertEqual(len(lowerCamelCase_ ), 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0], tokens['input_ids'] )
self.assertEqual(ordered_input_names[0], 'input_ids' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ), '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx', generated.as_posix() )
| 696 | 0 |
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = 3
lowerCamelCase__ : int = 2_5_0
lowerCamelCase__ : Dict = ids_tensor((batch_size, length), lowerCamelCase_ )
lowerCamelCase__ : Any = torch.ones((batch_size, length), device=lowerCamelCase_, dtype=torch.float ) / length
return input_ids, scores
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self._get_tensors(5 )
lowerCamelCase__ : str = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=1_0 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCamelCase_, lowerCamelCase_ ) )
lowerCamelCase__ : Optional[int] = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase_, lowerCamelCase_ ) )
lowerCamelCase__ : Dict = self._get_tensors(1_0 )
self.assertTrue(criteria(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = MaxLengthCriteria(max_length=1_0 )
lowerCamelCase__ : int = self._get_tensors(5 )
self.assertFalse(criteria(lowerCamelCase_, lowerCamelCase_ ) )
lowerCamelCase__ : int = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase_, lowerCamelCase_ ) )
lowerCamelCase__ : List[Any] = self._get_tensors(1_0 )
self.assertTrue(criteria(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = MaxNewTokensCriteria(start_length=5, max_new_tokens=5 )
lowerCamelCase__ : Union[str, Any] = self._get_tensors(5 )
self.assertFalse(criteria(lowerCamelCase_, lowerCamelCase_ ) )
lowerCamelCase__ : Union[str, Any] = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase_, lowerCamelCase_ ) )
lowerCamelCase__ : int = self._get_tensors(1_0 )
self.assertTrue(criteria(lowerCamelCase_, lowerCamelCase_ ) )
lowerCamelCase__ : Any = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length, 1_0 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self._get_tensors(5 )
lowerCamelCase__ : Tuple = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCamelCase_, lowerCamelCase_ ) )
lowerCamelCase__ : Tuple = MaxTimeCriteria(max_time=0.1, initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ), 1_0 )
with self.assertWarns(lowerCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ), 1_1 )
lowerCamelCase__ : Union[str, Any] = validate_stopping_criteria(StoppingCriteriaList(), 1_1 )
self.assertEqual(len(lowerCamelCase_ ), 1 )
| 711 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = KandinskyVaaControlnetImgaImgPipeline
lowerCamelCase__ : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : Dict = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : str = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase__ : Any = False
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a__ (self ):
'''simple docstring'''
return 1_0_0
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase__ : int = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def a__ (self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.dummy_unet
lowerCamelCase__ : List[Any] = self.dummy_movq
lowerCamelCase__ : Tuple = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCamelCase__ : Optional[Any] = DDIMScheduler(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create init_image
lowerCamelCase__ : Any = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Dict = image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase__ : Optional[Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create hint
lowerCamelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : int = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : Any = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 'cpu'
lowerCamelCase__ : List[Any] = self.get_dummy_components()
lowerCamelCase__ : List[Any] = self.pipeline_class(**lowerCamelCase_ )
lowerCamelCase__ : Dict = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Any = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
lowerCamelCase__ : List[Any] = output.images
lowerCamelCase__ : str = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ), return_dict=lowerCamelCase_, )[0]
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
lowerCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase__ : List[str] = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
lowerCamelCase__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase__ : Any = init_image.resize((5_1_2, 5_1_2) )
lowerCamelCase__ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowerCamelCase__ : Any = torch.from_numpy(np.array(lowerCamelCase_ ) ).float() / 255.0
lowerCamelCase__ : Optional[int] = hint.permute(2, 0, 1 ).unsqueeze(0 )
lowerCamelCase__ : Union[str, Any] = 'A robot, 4k photo'
lowerCamelCase__ : Any = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth', torch_dtype=torch.floataa )
lowerCamelCase__ : int = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : str = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = pipe_prior(
lowerCamelCase_, image=lowerCamelCase_, strength=0.85, generator=lowerCamelCase_, negative_prompt='', ).to_tuple()
lowerCamelCase__ : Union[str, Any] = pipeline(
image=lowerCamelCase_, image_embeds=lowerCamelCase_, negative_image_embeds=lowerCamelCase_, hint=lowerCamelCase_, generator=lowerCamelCase_, num_inference_steps=1_0_0, height=5_1_2, width=5_1_2, strength=0.5, output_type='np', )
lowerCamelCase__ : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCamelCase_, lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : int = logging.get_logger(__name__)
A_ : List[Any] = {
"andreasmadsen/efficient_mlm_m0.40": (
"https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"
),
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Any = 'roberta-prelayernorm'
def __init__(self, lowerCamelCase_=5_0_2_6_5, lowerCamelCase_=7_6_8, lowerCamelCase_=1_2, lowerCamelCase_=1_2, lowerCamelCase_=3_0_7_2, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=1e-12, lowerCamelCase_=1, lowerCamelCase_=0, lowerCamelCase_=2, lowerCamelCase_="absolute", lowerCamelCase_=True, lowerCamelCase_=None, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_, bos_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, **lowerCamelCase_ )
lowerCamelCase__ : List[str] = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Union[str, Any] = num_hidden_layers
lowerCamelCase__ : str = num_attention_heads
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : int = intermediate_size
lowerCamelCase__ : List[str] = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = max_position_embeddings
lowerCamelCase__ : List[str] = type_vocab_size
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Optional[Any] = layer_norm_eps
lowerCamelCase__ : List[Any] = position_embedding_type
lowerCamelCase__ : List[Any] = use_cache
lowerCamelCase__ : Union[str, Any] = classifier_dropout
class a_ ( snake_case_ ):
'''simple docstring'''
@property
def a__ (self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase__ : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 712 |
"""simple docstring"""
A_ : List[str] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 696 | 0 |
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = iter(_lowerCamelCase )
while True:
lowerCamelCase__ : str = tuple(itertools.islice(_lowerCamelCase , _lowerCamelCase ) )
if not chunk:
return
yield chunk
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
lowerCamelCase__ : Union[str, Any] = ''
if len(_lowerCamelCase ) < 2:
return dirty
for i in range(len(_lowerCamelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(_lowerCamelCase ) & 1:
clean += "X"
return clean
def lowerCamelCase_ ( _lowerCamelCase ):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
lowerCamelCase__ : str = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
lowerCamelCase__ : Tuple = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(_lowerCamelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(_lowerCamelCase )
return table
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : str = generate_table(_lowerCamelCase )
lowerCamelCase__ : Dict = prepare_input(_lowerCamelCase )
lowerCamelCase__ : str = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowerCamelCase , 2 ):
lowerCamelCase__ : Optional[Any] = divmod(table.index(_lowerCamelCase ) , 5 )
lowerCamelCase__ : Optional[Any] = divmod(table.index(_lowerCamelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = generate_table(_lowerCamelCase )
lowerCamelCase__ : Tuple = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowerCamelCase , 2 ):
lowerCamelCase__ : int = divmod(table.index(_lowerCamelCase ) , 5 )
lowerCamelCase__ : List[Any] = divmod(table.index(_lowerCamelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 713 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
A_ : Optional[int] = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
if group not in primes:
raise ValueError('Unsupported Group' )
lowerCamelCase__ : int = primes[group]['prime']
lowerCamelCase__ : Optional[int] = primes[group]['generator']
lowerCamelCase__ : Any = int(hexlify(urandom(3_2 ) ), base=1_6 )
def a__ (self ):
'''simple docstring'''
return hex(self.__private_key )[2:]
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = pow(self.generator, self.__private_key, self.prime )
return hex(lowerCamelCase_ )[2:]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase_, (self.prime - 1) // 2, self.prime ) == 1
)
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = int(lowerCamelCase_, base=1_6 )
if not self.is_valid_public_key(lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Tuple = pow(lowerCamelCase_, self.__private_key, self.prime )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase_, (prime - 1) // 2, lowerCamelCase_ ) == 1
)
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
lowerCamelCase__ : Dict = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[Any] = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[str] = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase_, lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Dict = pow(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 0 |
"""simple docstring"""
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=False, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=3_2, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=6_4, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, lowerCamelCase_=2, lowerCamelCase_=2, lowerCamelCase_=2, lowerCamelCase_=2, lowerCamelCase_=4, lowerCamelCase_=1, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : List[str] = seq_length
lowerCamelCase__ : Optional[int] = is_training
lowerCamelCase__ : Union[str, Any] = use_input_mask
lowerCamelCase__ : str = use_token_type_ids
lowerCamelCase__ : str = use_labels
lowerCamelCase__ : str = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : List[Any] = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_attention_heads
lowerCamelCase__ : List[str] = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : int = attention_probs_dropout_prob
lowerCamelCase__ : Optional[int] = max_position_embeddings
lowerCamelCase__ : Optional[Any] = type_vocab_size
lowerCamelCase__ : List[str] = type_sequence_label_size
lowerCamelCase__ : str = initializer_range
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : Optional[int] = num_choices
lowerCamelCase__ : List[Any] = scope
lowerCamelCase__ : List[Any] = q_groups
lowerCamelCase__ : int = k_groups
lowerCamelCase__ : List[Any] = v_groups
lowerCamelCase__ : List[Any] = post_attention_groups
lowerCamelCase__ : str = intermediate_groups
lowerCamelCase__ : List[str] = output_groups
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Optional[int] = None
if self.use_input_mask:
lowerCamelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ):
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, attention_probs_dropout_prob=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, q_groups=self.q_groups, k_groups=self.k_groups, v_groups=self.v_groups, post_attention_groups=self.post_attention_groups, intermediate_groups=self.intermediate_groups, output_groups=self.output_groups, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = SqueezeBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : str = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = SqueezeBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Any = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = SqueezeBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, start_positions=lowerCamelCase_, end_positions=lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.num_labels
lowerCamelCase__ : Union[str, Any] = SqueezeBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Dict = SqueezeBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.num_choices
lowerCamelCase__ : Tuple = SqueezeBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[str] = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Any = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.prepare_config_and_inputs()
(lowerCamelCase__) : Union[str, Any] = config_and_inputs
lowerCamelCase__ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ : Any = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Optional[int] = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = SqueezeBertModelTester(self )
lowerCamelCase__ : int = ConfigTester(self, config_class=lowerCamelCase_, dim=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowerCamelCase_ )
@slow
def a__ (self ):
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Dict = SqueezeBertModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
lowerCamelCase__ : int = torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]] )
lowerCamelCase__ : List[Any] = model(lowerCamelCase_ )[0]
lowerCamelCase__ : Dict = torch.Size((1, 3) )
self.assertEqual(output.shape, lowerCamelCase_ )
lowerCamelCase__ : Dict = torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-4 ) )
| 714 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(_lowerCamelCase ) * abs(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 696 | 0 |
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
A_ : List[str] = getLogger(__name__)
A_ : str = "cuda" if torch.cuda.is_available() else "cpu"
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 8 , _lowerCamelCase = DEFAULT_DEVICE , _lowerCamelCase=False , _lowerCamelCase="summarization" , _lowerCamelCase=None , **_lowerCamelCase , ):
lowerCamelCase__ : str = Path(_lowerCamelCase ).open('w' , encoding='utf-8' )
lowerCamelCase__ : Any = str(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
if fpaa:
lowerCamelCase__ : Optional[Any] = model.half()
lowerCamelCase__ : str = AutoTokenizer.from_pretrained(_lowerCamelCase )
logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
lowerCamelCase__ : List[Any] = time.time()
# update config with task specific params
use_task_specific_params(_lowerCamelCase , _lowerCamelCase )
if prefix is None:
lowerCamelCase__ : Any = prefix or getattr(model.config , 'prefix' , '' ) or ''
for examples_chunk in tqdm(list(chunks(_lowerCamelCase , _lowerCamelCase ) ) ):
lowerCamelCase__ : Optional[int] = [prefix + text for text in examples_chunk]
lowerCamelCase__ : str = tokenizer(_lowerCamelCase , return_tensors='pt' , truncation=_lowerCamelCase , padding='longest' ).to(_lowerCamelCase )
lowerCamelCase__ : Dict = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_lowerCamelCase , )
lowerCamelCase__ : str = tokenizer.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
lowerCamelCase__ : Union[str, Any] = int(time.time() - start_time ) # seconds
lowerCamelCase__ : Any = len(_lowerCamelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowerCamelCase_ ( ):
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def lowerCamelCase_ ( _lowerCamelCase=True ):
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument('model_name' , type=_lowerCamelCase , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' , type=_lowerCamelCase , help='like cnn_dm/test.source' )
parser.add_argument('save_path' , type=_lowerCamelCase , help='where to save summaries' )
parser.add_argument('--reference_path' , type=_lowerCamelCase , required=_lowerCamelCase , help='like cnn_dm/test.target' )
parser.add_argument('--score_path' , type=_lowerCamelCase , required=_lowerCamelCase , default='metrics.json' , help='where to save metrics' )
parser.add_argument('--device' , type=_lowerCamelCase , required=_lowerCamelCase , default=_lowerCamelCase , help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' , type=_lowerCamelCase , required=_lowerCamelCase , default=_lowerCamelCase , help='will be added to the begininng of src examples' )
parser.add_argument('--task' , type=_lowerCamelCase , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=_lowerCamelCase , default=8 , required=_lowerCamelCase , help='batch size' )
parser.add_argument(
'--n_obs' , type=_lowerCamelCase , default=-1 , required=_lowerCamelCase , help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' )
parser.add_argument(
'--info' , nargs='?' , type=_lowerCamelCase , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
lowerCamelCase__ : Tuple = parser.parse_known_args()
lowerCamelCase__ : Tuple = parse_numeric_n_bool_cl_kwargs(_lowerCamelCase )
if parsed_args and verbose:
print(f'''parsed the following generate kwargs: {parsed_args}''' )
lowerCamelCase__ : Any = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
lowerCamelCase__ : Optional[int] = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_lowerCamelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
lowerCamelCase__ : Any = generate_summaries_or_translations(
_lowerCamelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_lowerCamelCase , )
if args.reference_path is None:
return {}
# Compute scores
lowerCamelCase__ : List[str] = calculate_bleu if 'translation' in args.task else calculate_rouge
lowerCamelCase__ : Optional[Any] = [x.rstrip() for x in open(args.save_path ).readlines()]
lowerCamelCase__ : int = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_lowerCamelCase )]
lowerCamelCase__ : dict = score_fn(_lowerCamelCase , _lowerCamelCase )
scores.update(_lowerCamelCase )
if args.dump_args:
scores.update(_lowerCamelCase )
if args.info:
lowerCamelCase__ : List[str] = args.info
if verbose:
print(_lowerCamelCase )
if args.score_path is not None:
json.dump(_lowerCamelCase , open(args.score_path , 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 715 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : int = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : Optional[int] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : str = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : Optional[int] = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Tuple = c.n_embd + 1 # int
lowerCamelCase__ : Union[str, Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : List[Any] = not c.scale_attn_weights # bool
lowerCamelCase__ : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = PretrainedConfig()
lowerCamelCase__ : Optional[Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : Any = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : int = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = mock.Mock()
lowerCamelCase__ : List[str] = 5_0_0
lowerCamelCase__ : Any = {}
lowerCamelCase__ : int = HTTPError
lowerCamelCase__ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : str = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : str = ['config.42.0.0.json']
lowerCamelCase__ : Union[str, Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Optional[int] = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Dict = 'v3.0.0'
lowerCamelCase__ : List[str] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 696 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a_ ( snake_case_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def a__ (lowerCamelCase_ ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def a__ (self ):
'''simple docstring'''
raise NotImplementedError()
| 716 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Dict = value_function
lowerCamelCase__ : int = unet
lowerCamelCase__ : Union[str, Any] = scheduler
lowerCamelCase__ : int = env
lowerCamelCase__ : List[Any] = env.get_dataset()
lowerCamelCase__ : Dict = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Optional[Any] = self.data[key].mean()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[int] = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Tuple = self.data[key].std()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[Any] = env.observation_space.shape[0]
lowerCamelCase__ : List[str] = env.action_space.shape[0]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if type(lowerCamelCase_ ) is dict:
return {k: self.to_torch(lowerCamelCase_ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase_ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase_, device=self.unet.device )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for key, val in cond.items():
lowerCamelCase__ : Optional[Any] = val.clone()
return x_in
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = x.shape[0]
lowerCamelCase__ : Tuple = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowerCamelCase__ : Dict = torch.full((batch_size,), lowerCamelCase_, device=self.unet.device, dtype=torch.long )
for _ in range(lowerCamelCase_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowerCamelCase__ : str = self.value_function(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample
lowerCamelCase__ : Union[str, Any] = torch.autograd.grad([y.sum()], [x] )[0]
lowerCamelCase__ : Optional[int] = self.scheduler._get_variance(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = torch.exp(0.5 * posterior_variance )
lowerCamelCase__ : Tuple = model_std * grad
lowerCamelCase__ : str = 0
lowerCamelCase__ : Dict = x.detach()
lowerCamelCase__ : Dict = x + scale * grad
lowerCamelCase__ : Optional[int] = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : Tuple = self.unet(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample.permute(0, 2, 1 )
# TODO: verify deprecation of this kwarg
lowerCamelCase__ : Optional[Any] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, predict_epsilon=lowerCamelCase_ )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowerCamelCase__ : Any = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
return x, y
def __call__(self, lowerCamelCase_, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=0.1 ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.normalize(lowerCamelCase_, 'observations' )
lowerCamelCase__ : List[str] = obs[None].repeat(lowerCamelCase_, axis=0 )
lowerCamelCase__ : str = {0: self.to_torch(lowerCamelCase_ )}
lowerCamelCase__ : Optional[Any] = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowerCamelCase__ : List[Any] = randn_tensor(lowerCamelCase_, device=self.unet.device )
lowerCamelCase__ : int = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
# run the diffusion process
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.run_diffusion(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# sort output trajectories by value
lowerCamelCase__ : Union[str, Any] = y.argsort(0, descending=lowerCamelCase_ ).squeeze()
lowerCamelCase__ : List[str] = x[sorted_idx]
lowerCamelCase__ : Optional[Any] = sorted_values[:, :, : self.action_dim]
lowerCamelCase__ : Union[str, Any] = actions.detach().cpu().numpy()
lowerCamelCase__ : Union[str, Any] = self.de_normalize(lowerCamelCase_, key='actions' )
# select the action with the highest value
if y is not None:
lowerCamelCase__ : str = 0
else:
# if we didn't run value guiding, select a random action
lowerCamelCase__ : Optional[Any] = np.random.randint(0, lowerCamelCase_ )
lowerCamelCase__ : Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 696 | 0 |
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Union[str, Any] = "▁"
A_ : Dict = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = BigBirdTokenizer
lowerCamelCase__ : str = BigBirdTokenizerFast
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : Optional[Any] = True
def a__ (self ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : List[Any] = self.tokenizer_class(lowerCamelCase_, keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = '<s>'
lowerCamelCase__ : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ), lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ), lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '<unk>' )
self.assertEqual(vocab_keys[1], '<s>' )
self.assertEqual(vocab_keys[-1], '[MASK]' )
self.assertEqual(len(lowerCamelCase_ ), 1_0_0_4 )
def a__ (self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 1_0_0_0 )
def a__ (self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCamelCase__ : Dict = self.get_tokenizer()
lowerCamelCase__ : List[Any] = self.get_rust_tokenizer()
lowerCamelCase__ : Union[str, Any] = 'I was born in 92000, and this is falsé.'
lowerCamelCase__ : List[str] = tokenizer.tokenize(lowerCamelCase_ )
lowerCamelCase__ : str = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = self.get_rust_tokenizer()
lowerCamelCase__ : List[Any] = tokenizer.encode(lowerCamelCase_ )
lowerCamelCase__ : Any = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = BigBirdTokenizer(lowerCamelCase_, keep_accents=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCamelCase_, ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ), [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2], )
lowerCamelCase__ : str = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCamelCase_, [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
], )
lowerCamelCase__ : Any = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_, [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4], )
lowerCamelCase__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_, [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
], )
@cached_property
def a__ (self ):
'''simple docstring'''
return BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = 'Hello World!'
lowerCamelCase__ : Any = [6_5, 1_8_5_3_6, 2_2_6_0, 1_0_1, 6_6]
self.assertListEqual(lowerCamelCase_, self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
# fmt: off
lowerCamelCase__ : Dict = [6_5, 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, 6_6] # noqa: E231
# fmt: on
self.assertListEqual(lowerCamelCase_, self.big_tokenizer.encode(lowerCamelCase_ ) )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowerCamelCase__ : Any = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
lowerCamelCase__ : Tuple = ' '.join(lowerCamelCase_ )
lowerCamelCase__ : Any = self.big_tokenizer.encode_plus(lowerCamelCase_, return_tensors='pt', return_token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Any = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence], return_tensors='pt', return_token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : int = BigBirdConfig(attention_type='original_full' )
lowerCamelCase__ : str = BigBirdModel(lowerCamelCase_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase_ )
model(**lowerCamelCase_ )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
lowerCamelCase__ : Dict = tokenizer.decode(tokenizer('Paris is the [MASK].' ).input_ids )
self.assertTrue(decoded_text == '[CLS] Paris is the[MASK].[SEP]' )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = {'input_ids': [[6_5, 3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4, 6_6], [6_5, 4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [6_5, 4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_, model_name='google/bigbird-roberta-base', revision='215c99f1600e06f83acce68422f2035b2b5c3510', )
| 717 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = analyze_text(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
lowerCamelCase__ : List[Any] = sum(single_char_strings.values() )
# one length string
lowerCamelCase__ : str = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCamelCase__ : Tuple = single_char_strings[ch]
lowerCamelCase__ : Union[str, Any] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowerCamelCase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
lowerCamelCase__ : Dict = sum(two_char_strings.values() )
lowerCamelCase__ : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCamelCase__ : int = cha + cha
if sequence in two_char_strings:
lowerCamelCase__ : int = two_char_strings[sequence]
lowerCamelCase__ : Tuple = int(_lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(_lowerCamelCase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = Counter() # type: ignore
lowerCamelCase__ : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCamelCase_ ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 696 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Any = logging.get_logger(__name__)
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Any = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase__ : int = 192
lowerCamelCase__ : Tuple = 768
lowerCamelCase__ : Any = 12
lowerCamelCase__ : Tuple = 3
lowerCamelCase__ : int = [800, 1333]
lowerCamelCase__ : Dict = False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__ : Union[str, Any] = 330
lowerCamelCase__ : int = 14
lowerCamelCase__ : List[str] = 6
lowerCamelCase__ : List[Any] = 1320
elif "yolos_s" in yolos_name:
lowerCamelCase__ : Any = 384
lowerCamelCase__ : List[Any] = 1536
lowerCamelCase__ : Any = 12
lowerCamelCase__ : Optional[int] = 6
elif "yolos_b" in yolos_name:
lowerCamelCase__ : Optional[Any] = [800, 1344]
lowerCamelCase__ : Any = 91
lowerCamelCase__ : Dict = 'huggingface/label-files'
lowerCamelCase__ : Any = 'coco-detection-id2label.json'
lowerCamelCase__ : Union[str, Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type='dataset' ) , 'r' ) )
lowerCamelCase__ : Optional[Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : str = idalabel
lowerCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ : List[str] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowerCamelCase__ : Optional[Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : int = in_proj_weight[: config.hidden_size, :]
lowerCamelCase__ : str = in_proj_bias[: config.hidden_size]
lowerCamelCase__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ : Optional[Any] = in_proj_weight[-config.hidden_size :, :]
lowerCamelCase__ : int = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( _lowerCamelCase ):
if "backbone" in name:
lowerCamelCase__ : str = name.replace('backbone' , 'vit' )
if "cls_token" in name:
lowerCamelCase__ : Union[str, Any] = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
lowerCamelCase__ : str = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
lowerCamelCase__ : str = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
lowerCamelCase__ : Optional[Any] = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
lowerCamelCase__ : List[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
lowerCamelCase__ : List[str] = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
lowerCamelCase__ : Any = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowerCamelCase__ : Optional[Any] = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowerCamelCase__ : List[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowerCamelCase__ : Any = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowerCamelCase__ : Tuple = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCamelCase__ : Dict = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
lowerCamelCase__ : List[str] = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
lowerCamelCase__ : Tuple = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
lowerCamelCase__ : Dict = name.replace('vit.norm' , 'vit.layernorm' )
return name
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : str = orig_state_dict.pop(_lowerCamelCase )
if "qkv" in key:
lowerCamelCase__ : List[Any] = key.split('.' )
lowerCamelCase__ : int = int(key_split[2] )
lowerCamelCase__ : List[Any] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase__ : str = val[:dim, :]
lowerCamelCase__ : str = val[
dim : dim * 2, :
]
lowerCamelCase__ : List[Any] = val[-dim:, :]
else:
lowerCamelCase__ : List[str] = val[:dim]
lowerCamelCase__ : str = val[dim : dim * 2]
lowerCamelCase__ : Dict = val[-dim:]
else:
lowerCamelCase__ : int = val
return orig_state_dict
def lowerCamelCase_ ( ):
lowerCamelCase__ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ : Any = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False ):
lowerCamelCase__ : Optional[int] = get_yolos_config(_lowerCamelCase )
# load original state_dict
lowerCamelCase__ : Any = torch.load(_lowerCamelCase , map_location='cpu' )['model']
# load 🤗 model
lowerCamelCase__ : List[str] = YolosForObjectDetection(_lowerCamelCase )
model.eval()
lowerCamelCase__ : List[Any] = convert_state_dict(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase__ : Union[str, Any] = 800 if yolos_name != 'yolos_ti' else 512
lowerCamelCase__ : int = YolosImageProcessor(format='coco_detection' , size=_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = image_processor(images=prepare_img() , return_tensors='pt' )
lowerCamelCase__ : Tuple = model(**_lowerCamelCase )
lowerCamelCase__ : Optional[int] = outputs.logits, outputs.pred_boxes
lowerCamelCase__ : Dict = None, None
if yolos_name == "yolos_ti":
lowerCamelCase__ : str = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
lowerCamelCase__ : str = torch.tensor(
[[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase__ : Any = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
lowerCamelCase__ : Any = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase__ : Any = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__ : List[str] = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
lowerCamelCase__ : Optional[int] = torch.tensor(
[[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] )
elif yolos_name == "yolos_base":
lowerCamelCase__ : List[str] = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
lowerCamelCase__ : Dict = torch.tensor(
[[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] )
else:
raise ValueError(f'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
lowerCamelCase__ : Dict = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
lowerCamelCase__ : List[str] = model_mapping[yolos_name]
image_processor.push_to_hub(_lowerCamelCase , organization='hustvl' )
model.push_to_hub(_lowerCamelCase , organization='hustvl' )
if __name__ == "__main__":
A_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
A_ : Dict = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 718 |
"""simple docstring"""
import os
def lowerCamelCase_ ( ):
with open(os.path.dirname(_lowerCamelCase ) + '/p022_names.txt' ) as file:
lowerCamelCase__ : Union[str, Any] = str(file.readlines()[0] )
lowerCamelCase__ : int = names.replace('"' , '' ).split(',' )
names.sort()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : str = 0
for i, name in enumerate(_lowerCamelCase ):
for letter in name:
name_score += ord(_lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 696 | 0 |
"""simple docstring"""
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=1_0_2_4, lowerCamelCase_=1_0_2_4, lowerCamelCase_=3.6 ):
'''simple docstring'''
lowerCamelCase__ : str = tokenizer
lowerCamelCase__ : Any = tokenizer.bos_token_id
lowerCamelCase__ : Dict = dataset
lowerCamelCase__ : Dict = seq_length
lowerCamelCase__ : int = seq_length * chars_per_token * num_of_sequences
def __iter__(self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = iter(self.dataset )
lowerCamelCase__ : int = True
while more_examples:
lowerCamelCase__ : Optional[Any] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(lowerCamelCase_ )['content'] )
buffer_len += len(buffer[-1] )
except StopIteration:
lowerCamelCase__ : Union[str, Any] = False
break
lowerCamelCase__ : Union[str, Any] = tokenizer(lowerCamelCase_, truncation=lowerCamelCase_ )['input_ids']
lowerCamelCase__ : Dict = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0, len(lowerCamelCase_ ), self.seq_length ):
lowerCamelCase__ : List[Any] = all_token_ids[i : i + self.seq_length]
if len(lowerCamelCase_ ) == self.seq_length:
yield torch.tensor(lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Tuple = {'streaming': True}
lowerCamelCase__ : Optional[Any] = load_dataset(args.dataset_name , split='train' , **_lowerCamelCase )
lowerCamelCase__ : int = ConstantLengthDataset(_lowerCamelCase , _lowerCamelCase , seq_length=args.seq_length )
lowerCamelCase__ : List[Any] = DataLoader(_lowerCamelCase , batch_size=args.batch_size )
return eval_dataloader
def lowerCamelCase_ ( _lowerCamelCase ):
model.eval()
lowerCamelCase__ : int = []
for step, batch in enumerate(_lowerCamelCase ):
with torch.no_grad():
lowerCamelCase__ : int = model(_lowerCamelCase , labels=_lowerCamelCase )
lowerCamelCase__ : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_lowerCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
lowerCamelCase__ : Any = torch.mean(torch.cat(_lowerCamelCase ) )
try:
lowerCamelCase__ : Dict = torch.exp(_lowerCamelCase )
except OverflowError:
lowerCamelCase__ : Optional[Any] = float('inf' )
return loss.item(), perplexity.item()
# Setup Accelerator
A_ : Optional[int] = Accelerator()
# Parse configuration
A_ : Any = HfArgumentParser(EvaluationArguments)
A_ : Dict = parser.parse_args()
set_seed(args.seed)
# Logging
A_ : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
# Load model and tokenizer
A_ : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
A_ : int = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
A_ : List[Any] = create_dataloader(args)
# Prepare everything with our `accelerator`.
A_ : Any = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("Evaluating and saving model after training")
A_ : Optional[Any] = evaluate(args)
logger.info(f"loss/eval: {eval_loss}, perplexity: {perplexity}")
| 719 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : int = 'Speech2TextFeatureExtractor'
lowerCamelCase__ : Dict = 'Speech2TextTokenizer'
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
def __call__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase_, **lowerCamelCase_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowerCamelCase__ : Optional[int] = kwargs.pop('raw_speech' )
else:
lowerCamelCase__ : int = kwargs.pop('audio', lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = kwargs.pop('sampling_rate', lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = kwargs.pop('text', lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
lowerCamelCase__ : List[str] = args[0]
lowerCamelCase__ : Any = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowerCamelCase__ : Union[str, Any] = self.feature_extractor(lowerCamelCase_, *lowerCamelCase_, sampling_rate=lowerCamelCase_, **lowerCamelCase_ )
if text is not None:
lowerCamelCase__ : List[Any] = self.tokenizer(lowerCamelCase_, **lowerCamelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ : Tuple = encodings['input_ids']
return inputs
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@contextmanager
def a__ (self ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = self.tokenizer
yield
lowerCamelCase__ : Optional[int] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
| 696 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : Any = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = 'longformer'
def __init__(self, lowerCamelCase_ = 5_1_2, lowerCamelCase_ = 2, lowerCamelCase_ = 1, lowerCamelCase_ = 0, lowerCamelCase_ = 2, lowerCamelCase_ = 3_0_5_2_2, lowerCamelCase_ = 7_6_8, lowerCamelCase_ = 1_2, lowerCamelCase_ = 1_2, lowerCamelCase_ = 3_0_7_2, lowerCamelCase_ = "gelu", lowerCamelCase_ = 0.1, lowerCamelCase_ = 0.1, lowerCamelCase_ = 5_1_2, lowerCamelCase_ = 2, lowerCamelCase_ = 0.02, lowerCamelCase_ = 1e-12, lowerCamelCase_ = False, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_, **lowerCamelCase_ )
lowerCamelCase__ : str = attention_window
lowerCamelCase__ : Union[str, Any] = sep_token_id
lowerCamelCase__ : List[str] = bos_token_id
lowerCamelCase__ : int = eos_token_id
lowerCamelCase__ : List[str] = vocab_size
lowerCamelCase__ : Optional[int] = hidden_size
lowerCamelCase__ : Union[str, Any] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : List[Any] = intermediate_size
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : List[str] = type_vocab_size
lowerCamelCase__ : Tuple = initializer_range
lowerCamelCase__ : Any = layer_norm_eps
lowerCamelCase__ : Dict = onnx_export
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ = "default", lowerCamelCase_ = None ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Dict = True
@property
def a__ (self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : Dict = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase__ : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = super().outputs
if self.task == "default":
lowerCamelCase__ : Optional[Any] = {0: 'batch'}
return outputs
@property
def a__ (self ):
'''simple docstring'''
return 1e-4
@property
def a__ (self ):
'''simple docstring'''
return max(super().default_onnx_opset, 1_4 )
def a__ (self, lowerCamelCase_, lowerCamelCase_ = -1, lowerCamelCase_ = -1, lowerCamelCase_ = False, lowerCamelCase_ = None, ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = super().generate_dummy_inputs(
preprocessor=lowerCamelCase_, batch_size=lowerCamelCase_, seq_length=lowerCamelCase_, is_pair=lowerCamelCase_, framework=lowerCamelCase_ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowerCamelCase__ : Optional[Any] = torch.zeros_like(inputs['input_ids'] )
# make every second token global
lowerCamelCase__ : List[Any] = 1
return inputs
| 720 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : Optional[Any] = use_input_mask
lowerCamelCase__ : List[Any] = use_token_type_ids
lowerCamelCase__ : List[Any] = use_labels
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : Optional[int] = embedding_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Dict = num_choices
lowerCamelCase__ : Tuple = scope
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : List[str] = None
if self.use_input_mask:
lowerCamelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_token_type_ids:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Any = None
lowerCamelCase__ : Union[str, Any] = None
if self.use_labels:
lowerCamelCase__ : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ):
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, embedding_size=self.embedding_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, next_sentence_label=lowerCamelCase_, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, start_positions=lowerCamelCase_, end_positions=lowerCamelCase_, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : int = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.num_choices
lowerCamelCase__ : Dict = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : int = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : List[str] = config_and_inputs
lowerCamelCase__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : int = True
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
lowerCamelCase__ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase_ )
return inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = MobileBertModelTester(self )
lowerCamelCase__ : List[str] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
return torch.tensor(
_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase , )
A_ : Tuple = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(lowerCamelCase_ )
lowerCamelCase__ : Tuple = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )[0]
lowerCamelCase__ : Optional[int] = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[
[
[-2.4_736_526e07, 8.2_691_656e04, 1.6_521_838e05],
[-5.7_541_704e-01, 3.9_056_022e00, 4.4_011_507e00],
[2.6_047_359e00, 1.5_677_652e00, -1.7_324_188e-01],
]
], device=lowerCamelCase_, )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCamelCase__ : Optional[int] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCamelCase__ : Any = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 696 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class a_ ( snake_case_ ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase_, 'tf_padding' ) )
self.parent.assertTrue(hasattr(lowerCamelCase_, 'depth_multiplier' ) )
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=3, lowerCamelCase_=3_2, lowerCamelCase_=0.25, lowerCamelCase_=8, lowerCamelCase_=True, lowerCamelCase_=1_0_2_4, lowerCamelCase_=3_2, lowerCamelCase_="relu6", lowerCamelCase_=0.1, lowerCamelCase_=0.02, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=1_0, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : int = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : Any = num_channels
lowerCamelCase__ : Union[str, Any] = image_size
lowerCamelCase__ : List[Any] = depth_multiplier
lowerCamelCase__ : Optional[int] = min_depth
lowerCamelCase__ : Union[str, Any] = tf_padding
lowerCamelCase__ : Tuple = int(last_hidden_size * depth_multiplier )
lowerCamelCase__ : Tuple = output_stride
lowerCamelCase__ : List[Any] = hidden_act
lowerCamelCase__ : str = classifier_dropout_prob
lowerCamelCase__ : List[Any] = use_labels
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : List[str] = initializer_range
lowerCamelCase__ : Tuple = scope
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : str = ids_tensor([self.batch_size], self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
lowerCamelCase__ : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def a__ (self ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, min_depth=self.min_depth, tf_padding=self.tf_padding, hidden_act=self.hidden_act, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = MobileNetVaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Any = MobileNetVaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.prepare_config_and_inputs()
lowerCamelCase__ : Any = config_and_inputs
lowerCamelCase__ : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
lowerCamelCase__ : Dict = (
{'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : int = False
lowerCamelCase__ : int = False
lowerCamelCase__ : int = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = MobileNetVaModelTester(self )
lowerCamelCase__ : List[Any] = MobileNetVaConfigTester(self, config_class=lowerCamelCase_, has_text_modality=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV1 does not use inputs_embeds' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV1 does not support input and output embeddings' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV1 does not output attentions' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Optional[Any] = [*signature.parameters.keys()]
lowerCamelCase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : List[Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : int = model(**self._prepare_for_class(lowerCamelCase_, lowerCamelCase_ ) )
lowerCamelCase__ : List[Any] = outputs.hidden_states
lowerCamelCase__ : Any = 2_6
self.assertEqual(len(lowerCamelCase_ ), lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[Any] = True
check_hidden_states_output(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Tuple = True
check_hidden_states_output(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def a__ (self ):
'''simple docstring'''
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Optional[Any] = MobileNetVaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCamelCase_ ( ):
lowerCamelCase__ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v1_1.0_224' ) if is_vision_available() else None
)
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v1_1.0_224' ).to(lowerCamelCase_ )
lowerCamelCase__ : Tuple = self.default_image_processor
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : Optional[int] = image_processor(images=lowerCamelCase_, return_tensors='pt' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : List[str] = model(**lowerCamelCase_ )
# verify the logits
lowerCamelCase__ : Any = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape, lowerCamelCase_ )
lowerCamelCase__ : int = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase_, atol=1e-4 ) )
| 721 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
A_ : str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, lowerCamelCase_=1 ):
'''simple docstring'''
lowerCamelCase__ : Any = tokenizer
lowerCamelCase__ : Optional[Any] = dataset
lowerCamelCase__ : int = len(lowerCamelCase_ ) if n_tasks is None else n_tasks
lowerCamelCase__ : Any = n_copies
def __iter__(self ):
'''simple docstring'''
lowerCamelCase__ : Dict = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ : Optional[int] = self.tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = start_length
lowerCamelCase__ : List[str] = eof_strings
lowerCamelCase__ : List[str] = tokenizer
def __call__(self, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ : Optional[Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = re.split('(%s)' % '|'.join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=20 , **_lowerCamelCase ):
lowerCamelCase__ : List[str] = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
lowerCamelCase__ : str = batch['ids'].shape[-1]
lowerCamelCase__ : int = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
lowerCamelCase__ : Optional[Any] = batch['task_id'].repeat(_lowerCamelCase )
lowerCamelCase__ : List[Any] = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ : List[Any] = generated_tokens.cpu().numpy()
lowerCamelCase__ : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
lowerCamelCase__ : str = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ : Optional[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def lowerCamelCase_ ( ):
# Setup configuration
lowerCamelCase__ : int = HfArgumentParser(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ : Tuple = 'false'
if args.num_workers is None:
lowerCamelCase__ : List[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ : List[Any] = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
lowerCamelCase__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ : Optional[int] = tokenizer.eos_token
lowerCamelCase__ : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ : Optional[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ : Any = load_dataset('openai_humaneval' )
lowerCamelCase__ : Optional[int] = load_metric('code_eval' )
lowerCamelCase__ : List[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ : Optional[int] = args.n_samples // args.batch_size
lowerCamelCase__ : Tuple = TokenizedDataset(_lowerCamelCase , human_eval['test'] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ : Union[str, Any] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ : List[Any] = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ : str = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
lowerCamelCase__ : List[str] = []
for task in tqdm(range(_lowerCamelCase ) ):
lowerCamelCase__ : int = human_eval['test'][task]['test']
lowerCamelCase__ : Union[str, Any] = f'''check({human_eval['test'][task]['entry_point']})'''
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ : Any = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 696 | 0 |
'''simple docstring'''
def _snake_case ( lowercase , lowercase ) -> int:
return int(input_a == input_a == 0 )
def _snake_case ( ) -> None:
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(F"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(F"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(F"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(F"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 697 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = 42
lowercase__ = 42
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
@torch.no_grad()
def __call__( self , __UpperCamelCase = 1 , __UpperCamelCase = 50 , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , **__UpperCamelCase , ):
'''simple docstring'''
__a : int = self.unet.config.sample_size
__a : Optional[int] = (batch_size, 3, img_size, img_size)
__a : Union[str, Any] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
__a : Dict = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
__a : Dict = self.scheduler.schedule[t]
__a : Any = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
__a , __a : Tuple = self.scheduler.add_noise_to_input(__UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
__a : List[Any] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
__a : str = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
__a : Union[str, Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
__a : Tuple = self.scheduler.step_correct(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , step_output.prev_sample , step_output["""derivative"""] , )
__a : Tuple = step_output.prev_sample
__a : Optional[Any] = (sample / 2 + 0.5).clamp(0 , 1 )
__a : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a : List[Any] = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase ) | 697 | 1 |
'''simple docstring'''
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__SCREAMING_SNAKE_CASE : Tuple = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
__SCREAMING_SNAKE_CASE : Tuple = dataset.iloc[:, 1:2].values
__SCREAMING_SNAKE_CASE : Any = dataset.iloc[:, 2].values
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = train_test_split(X, y, test_size=0.2, random_state=0)
__SCREAMING_SNAKE_CASE : Optional[int] = PolynomialFeatures(degree=4)
__SCREAMING_SNAKE_CASE : Optional[Any] = poly_reg.fit_transform(X)
__SCREAMING_SNAKE_CASE : Optional[Any] = LinearRegression()
pol_reg.fit(X_poly, y)
def _snake_case ( ) -> Any:
plt.scatter(lowercase , lowercase , color="""red""" )
plt.plot(lowercase , pol_reg.predict(poly_reg.fit_transform(lowercase ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003 | 697 |
'''simple docstring'''
def _snake_case ( lowercase ) -> bool:
if not isinstance(lowercase , lowercase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
__a : str = str(lowercase )
__a : Any = """""".join(sorted(lowercase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _snake_case ( lowercase = 9_9 ) -> int:
if not 0 < percent < 1_0_0:
raise ValueError("""solution() only accepts values from 0 to 100""" )
__a : List[str] = 0
__a : Union[str, Any] = 1
while True:
if check_bouncy(lowercase ):
bouncy_num += 1
if (bouncy_num / num) * 1_0_0 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''') | 697 | 1 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class SCREAMING_SNAKE_CASE__ :
def __init__( self , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__a : List[Any] = len(__UpperCamelCase ) - 1
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__a : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __UpperCamelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__UpperCamelCase ) , 5 ) == 1
return output_values
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__a : Tuple = self.basis_function(__UpperCamelCase )
__a : int = 0.0
__a : Tuple = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __lowerCamelCase ( self , __UpperCamelCase = 0.0_1 ):
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
__a : list[float] = [] # x coordinates of points to plot
__a : list[float] = [] # y coordinates of points to plot
__a : Union[str, Any] = 0.0
while t <= 1:
__a : int = self.bezier_curve_function(__UpperCamelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__a : List[str] = [i[0] for i in self.list_of_points]
__a : Optional[int] = [i[1] for i in self.list_of_points]
plt.plot(
__UpperCamelCase , __UpperCamelCase , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(__UpperCamelCase , __UpperCamelCase , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3 | 697 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _snake_case ( lowercase , lowercase , lowercase ) -> Any:
# Construct model
if gpta_config_file == "":
__a : Dict = GPTaConfig()
else:
__a : Optional[Any] = GPTaConfig.from_json_file(lowercase )
__a : Union[str, Any] = GPTaModel(lowercase )
# Load weights from numpy
load_tf_weights_in_gpta(lowercase , lowercase , lowercase )
# Save pytorch-model
__a : Optional[int] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__a : Dict = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowercase )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path) | 697 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__SCREAMING_SNAKE_CASE : Tuple = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = ['LayoutLMv3FeatureExtractor']
__SCREAMING_SNAKE_CASE : Optional[Any] = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
__SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def __lowerCamelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = ObjectDetectionPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 )
self.assertGreater(len(__UpperCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__UpperCamelCase , {
"""score""": ANY(__UpperCamelCase ),
"""label""": ANY(__UpperCamelCase ),
"""box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )},
} , )
import datasets
__a : Optional[int] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
__a : Tuple = [
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
__a : Any = object_detector(__UpperCamelCase , threshold=0.0 )
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for outputs in batch_outputs:
self.assertGreater(len(__UpperCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__UpperCamelCase , {
"""score""": ANY(__UpperCamelCase ),
"""label""": ANY(__UpperCamelCase ),
"""box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )},
} , )
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = """hf-internal-testing/tiny-detr-mobilenetsv3"""
__a : Dict = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase )
__a : Optional[Any] = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
__a : str = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__a : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
] , )
__a : Union[str, Any] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
[
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = """facebook/detr-resnet-50"""
__a : Dict = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase )
__a : int = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
__a : int = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__a : Any = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
__a : Optional[Any] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = """facebook/detr-resnet-50"""
__a : Optional[int] = pipeline("""object-detection""" , model=__UpperCamelCase )
__a : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
__a : List[str] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = 0.9_9_8_5
__a : Union[str, Any] = """facebook/detr-resnet-50"""
__a : Optional[int] = pipeline("""object-detection""" , model=__UpperCamelCase )
__a : Union[str, Any] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=__UpperCamelCase )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = """Narsil/layoutlmv3-finetuned-funsd"""
__a : List[Any] = 0.9_9_9_3
__a : Dict = pipeline("""object-detection""" , model=__UpperCamelCase , threshold=__UpperCamelCase )
__a : List[str] = object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
{"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
] , ) | 697 | 1 |
'''simple docstring'''
def _snake_case ( lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
__a : List[Any] = mf_knapsack(i - 1 , lowercase , lowercase , lowercase )
else:
__a : Any = max(
mf_knapsack(i - 1 , lowercase , lowercase , lowercase ) , mf_knapsack(i - 1 , lowercase , lowercase , j - wt[i - 1] ) + val[i - 1] , )
__a : Optional[int] = val
return f[i][j]
def _snake_case ( lowercase , lowercase , lowercase , lowercase ) -> int:
__a : int = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
__a : Any = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
__a : List[Any] = dp[i - 1][w_]
return dp[n][w_], dp
def _snake_case ( lowercase , lowercase , lowercase ) -> Any:
if not (isinstance(lowercase , (list, tuple) ) and isinstance(lowercase , (list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
__a : Tuple = len(lowercase )
if num_items != len(lowercase ):
__a : Optional[int] = (
"""The number of weights must be the same as the number of values.\n"""
F"""But got {num_items} weights and {len(lowercase )} values"""
)
raise ValueError(lowercase )
for i in range(lowercase ):
if not isinstance(wt[i] , lowercase ):
__a : Tuple = (
"""All weights must be integers but got weight of """
F"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(lowercase )
__a , __a : Dict = knapsack(lowercase , lowercase , lowercase , lowercase )
__a : set = set()
_construct_solution(lowercase , lowercase , lowercase , lowercase , lowercase )
return optimal_val, example_optional_set
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(lowercase , lowercase , i - 1 , lowercase , lowercase )
else:
optimal_set.add(lowercase )
_construct_solution(lowercase , lowercase , i - 1 , j - wt[i - 1] , lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Tuple = [3, 2, 4, 4]
__SCREAMING_SNAKE_CASE : Union[str, Any] = [4, 3, 2, 3]
__SCREAMING_SNAKE_CASE : Any = 4
__SCREAMING_SNAKE_CASE : List[Any] = 6
__SCREAMING_SNAKE_CASE : Tuple = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset) | 697 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : List[str] = {
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = [
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 | 1 |
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _snake_case ( lowercase = 8 ) -> str:
__a : str = ascii_letters + digits + punctuation
return "".join(secrets.choice(lowercase ) for _ in range(lowercase ) )
def _snake_case ( lowercase , lowercase ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(lowercase )
__a : Dict = i // 3
__a : Optional[Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
__a : Union[str, Any] = (
chars_incl
+ random(lowercase , quotient + remainder )
+ random(lowercase , lowercase )
+ random(lowercase , lowercase )
)
__a : List[Any] = list(lowercase )
shuffle(lowercase )
return "".join(lowercase )
# random is a generalised function for letters, characters and numbers
def _snake_case ( lowercase , lowercase ) -> str:
return "".join(secrets.choice(lowercase ) for _ in range(lowercase ) )
def _snake_case ( lowercase , lowercase ) -> Optional[int]:
pass # Put your code here...
def _snake_case ( lowercase , lowercase ) -> Tuple:
pass # Put your code here...
def _snake_case ( lowercase , lowercase ) -> str:
pass # Put your code here...
def _snake_case ( lowercase , lowercase = 8 ) -> bool:
if len(lowercase ) < min_length:
# Your Password must be at least 8 characters long
return False
__a : List[str] = any(char in ascii_uppercase for char in password )
__a : Dict = any(char in ascii_lowercase for char in password )
__a : Optional[Any] = any(char in digits for char in password )
__a : Dict = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _snake_case ( ) -> List[Any]:
__a : Any = int(input("""Please indicate the max length of your password: """ ).strip() )
__a : Tuple = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(lowercase ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(lowercase , lowercase ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main() | 697 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Any = params
__a : Optional[Any] = np.array(__UpperCamelCase )
__a : Union[str, Any] = np.array([len(__UpperCamelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __UpperCamelCase ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
'''simple docstring'''
return len(self.lengths )
def __lowerCamelCase ( self ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = self.params.max_model_input_size
__a : Union[str, Any] = self.lengths > max_len
logger.info(f"""Splitting {sum(__UpperCamelCase )} too long sequences.""" )
def divide_chunks(__UpperCamelCase , __UpperCamelCase ):
return [l[i : i + n] for i in range(0 , len(__UpperCamelCase ) , __UpperCamelCase )]
__a : int = []
__a : Union[str, Any] = []
if self.params.mlm:
__a , __a : Any = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
__a , __a : str = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__a : Any = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__a : int = np.insert(__UpperCamelCase , 0 , __UpperCamelCase )
if sub_s[-1] != sep_id:
__a : str = np.insert(__UpperCamelCase , len(__UpperCamelCase ) , __UpperCamelCase )
assert len(__UpperCamelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__UpperCamelCase )
new_tok_ids.extend(__UpperCamelCase )
new_lengths.extend([len(__UpperCamelCase ) for l in sub_seqs] )
__a : Dict = np.array(__UpperCamelCase )
__a : Tuple = np.array(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = len(self )
__a : List[str] = self.lengths > 11
__a : int = self.token_ids[indices]
__a : Union[str, Any] = self.lengths[indices]
__a : Any = len(self )
logger.info(f"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
__a : List[str] = self.params.special_tok_ids["""unk_token"""]
__a : str = len(self )
__a : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__a : Optional[Any] = (unk_occs / self.lengths) < 0.5
__a : List[str] = self.token_ids[indices]
__a : Optional[int] = self.lengths[indices]
__a : Any = len(self )
logger.info(f"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(f"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = [t[0] for t in batch]
__a : str = [t[1] for t in batch]
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
# Max for paddings
__a : Optional[int] = max(__UpperCamelCase )
# Pad token ids
if self.params.mlm:
__a : int = self.params.special_tok_ids["""pad_token"""]
else:
__a : Tuple = self.params.special_tok_ids["""unk_token"""]
__a : Any = [list(t.astype(__UpperCamelCase ) ) + [pad_idx] * (max_seq_len_ - len(__UpperCamelCase )) for t in token_ids]
assert len(tk_ ) == len(__UpperCamelCase )
assert all(len(__UpperCamelCase ) == max_seq_len_ for t in tk_ )
__a : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
__a : Optional[Any] = torch.tensor(__UpperCamelCase ) # (bs)
return tk_t, lg_t | 697 | 1 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE__ :
def __init__( self , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="resnet50" , __UpperCamelCase=3 , __UpperCamelCase=32 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , ):
'''simple docstring'''
__a : Optional[Any] = parent
__a : int = out_indices if out_indices is not None else [4]
__a : Optional[int] = stage_names
__a : Optional[Any] = out_features
__a : Optional[Any] = backbone
__a : List[Any] = batch_size
__a : Any = image_size
__a : Union[str, Any] = num_channels
__a : List[str] = use_pretrained_backbone
__a : List[str] = is_training
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Tuple = self.get_config()
return config, pixel_values
def __lowerCamelCase ( self ):
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[int] = TimmBackbone(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
__a : Optional[int] = model(__UpperCamelCase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = self.prepare_config_and_inputs()
__a , __a : Optional[int] = config_and_inputs
__a : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
lowercase__ = (TimmBackbone,) if is_torch_available() else ()
lowercase__ = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = TimmBackboneModelTester(self )
__a : Any = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = """resnet18"""
__a : Optional[Any] = """microsoft/resnet-18"""
__a : Optional[Any] = AutoBackbone.from_pretrained(__UpperCamelCase , use_timm_backbone=__UpperCamelCase )
__a : Optional[int] = AutoBackbone.from_pretrained(__UpperCamelCase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__a : Optional[int] = AutoBackbone.from_pretrained(__UpperCamelCase , use_timm_backbone=__UpperCamelCase , out_indices=[1, 2, 3] )
__a : int = AutoBackbone.from_pretrained(__UpperCamelCase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Optional[int] = model_class(__UpperCamelCase )
__a : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[Any] = [*signature.parameters.keys()]
__a : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__a : int = True
__a : Dict = self.has_attentions
# no need to test all models as different heads yield the same functionality
__a : Optional[int] = self.all_model_classes[0]
__a : List[str] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
__a : int = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
__a : Union[str, Any] = model(**__UpperCamelCase )
__a : Dict = outputs[0][-1]
# Encoder-/Decoder-only models
__a : Dict = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__a : Tuple = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__UpperCamelCase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : Any = model(**__UpperCamelCase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__a : Dict = copy.deepcopy(__UpperCamelCase )
__a : int = None
__a : Optional[Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : Dict = model(**__UpperCamelCase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__a : Any = copy.deepcopy(__UpperCamelCase )
__a : Optional[int] = False
__a : List[str] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : List[Any] = model(**__UpperCamelCase ) | 697 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = ""
lowercase__ = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(self , **__UpperCamelCase )
__a : int = repo_info
__a : int = token
__a : Any = None
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.dir_cache is None:
__a : Union[str, Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__a : List[str] = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"""name""": str(__UpperCamelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , ):
'''simple docstring'''
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
__a : Any = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __lowerCamelCase ( self , __UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
self._get_dirs()
__a : str = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase ):
'''simple docstring'''
self._get_dirs()
__a : int = PurePosixPath(path.strip("""/""" ) )
__a : List[str] = {}
for p, f in self.dir_cache.items():
__a : str = PurePosixPath(p.strip("""/""" ) )
__a : Optional[int] = p.parent
if root == path:
__a : List[str] = f
__a : str = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out ) | 697 | 1 |
'''simple docstring'''
from math import factorial
def _snake_case ( lowercase = 1_0_0 ) -> int:
return sum(int(lowercase ) for x in str(factorial(lowercase ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip()))) | 697 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , __UpperCamelCase , __UpperCamelCase=2 , __UpperCamelCase=32 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=4 , __UpperCamelCase=[0, 1, 2, 3] , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0_2 , __UpperCamelCase=3 , __UpperCamelCase=[1, 384, 24, 24] , __UpperCamelCase=True , __UpperCamelCase=None , ):
'''simple docstring'''
__a : List[str] = parent
__a : Tuple = batch_size
__a : str = image_size
__a : int = patch_size
__a : Dict = num_channels
__a : int = is_training
__a : Dict = use_labels
__a : Union[str, Any] = hidden_size
__a : Dict = num_hidden_layers
__a : Dict = backbone_out_indices
__a : Optional[int] = num_attention_heads
__a : List[str] = intermediate_size
__a : Optional[Any] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Tuple = attention_probs_dropout_prob
__a : Any = initializer_range
__a : Any = num_labels
__a : Optional[Any] = backbone_featmap_shape
__a : List[Any] = scope
__a : List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__a : Union[str, Any] = (image_size // patch_size) ** 2
__a : List[str] = num_patches + 1
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Union[str, Any] = None
if self.use_labels:
__a : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 192, 384, 768],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = DPTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = self.num_labels
__a : Union[str, Any] = DPTForDepthEstimation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Dict = self.num_labels
__a : Tuple = DPTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : str = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
lowercase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowercase__ = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = DPTModelTester(self )
__a : List[Any] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : str = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Any = model_class(__UpperCamelCase )
__a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : int = [*signature.parameters.keys()]
__a : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__a : List[Any] = True
if model_class in get_values(__UpperCamelCase ):
continue
__a : str = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
__a : Union[str, Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__a : List[Any] = model(**__UpperCamelCase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = False
__a : Dict = True
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
__a : Any = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
__a : List[str] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__a : Dict = model(**__UpperCamelCase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
__a : Any = model_class(config=__UpperCamelCase )
# Skip the check for the backbone
__a : Optional[Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__a : Optional[int] = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__a : int = DPTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : int = self.model_tester.prepare_config_and_inputs_for_common()
__a : Optional[int] = """add"""
with self.assertRaises(__UpperCamelCase ):
__a : int = DPTForDepthEstimation(__UpperCamelCase )
def _snake_case ( ) -> Any:
__a : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
__a : int = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__UpperCamelCase )
__a : Union[str, Any] = prepare_img()
__a : Any = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__a : Optional[Any] = model(**__UpperCamelCase )
__a : int = outputs.predicted_depth
# verify the predicted depth
__a : Any = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __UpperCamelCase )
__a : int = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __UpperCamelCase , atol=1E-4 ) ) | 697 | 1 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = ["audio_values", "audio_mask"]
def __init__( self , __UpperCamelCase=2048 , __UpperCamelCase=1 , __UpperCamelCase=[16, 16] , __UpperCamelCase=128 , __UpperCamelCase=4_4100 , __UpperCamelCase=86 , __UpperCamelCase=2048 , __UpperCamelCase=0.0 , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(
feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase , )
__a : List[str] = spectrogram_length
__a : str = num_channels
__a : Union[str, Any] = patch_size
__a : List[str] = feature_size // self.patch_size[1]
__a : Optional[int] = n_fft
__a : Any = sampling_rate // hop_length_to_sampling_rate
__a : Dict = sampling_rate
__a : Dict = padding_value
__a : str = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__UpperCamelCase , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=__UpperCamelCase , norm="""slaney""" , mel_scale="""slaney""" , ).T
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : List[Any] = spectrogram(
__UpperCamelCase , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=8_0.0 , )
__a : List[str] = log_spec[:, :-1]
__a : Union[str, Any] = log_spec - 2_0.0
__a : int = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , **__UpperCamelCase , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__a : str = isinstance(__UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__a : Any = is_batched_numpy or (
isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__a : Union[str, Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ):
__a : Dict = np.asarray(__UpperCamelCase , dtype=np.floataa )
elif isinstance(__UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__a : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__a : Optional[int] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__a : List[str] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __UpperCamelCase ):
__a : Optional[int] = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__a : Optional[Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__a : Any = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__a : str = np.array(__UpperCamelCase ).astype(np.floataa )
# convert into correct format for padding
__a : Any = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__a : int = np.ones([len(__UpperCamelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__a : Tuple = padded_audio_features * self.padding_value
for i in range(len(__UpperCamelCase ) ):
__a : List[Any] = audio_features[i]
__a : Dict = feature
# return as BatchFeature
if return_attention_mask:
__a : str = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
__a : Dict = {"""audio_values""": padded_audio_features}
__a : Union[str, Any] = BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
return encoded_inputs | 697 |
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
__a : Optional[int] = Vector()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(__UpperCamelCase ) , """(0,0,0,0,0,1)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Vector([1, 2, 3, 4] )
self.assertEqual(len(__UpperCamelCase ) , 4 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = Vector([1, 2] )
__a : List[str] = Vector([1, 2, 3, 4, 5] )
__a : Optional[int] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__a : Dict = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
__a : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = Vector([1, 2, 3] )
__a : Any = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Vector([1, 2, 3] )
__a : Optional[Any] = Vector([2, -1, 4] ) # for test of dot product
__a : Union[str, Any] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
__a : Optional[int] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , __UpperCamelCase , __UpperCamelCase ) ) , """(3,4,7)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = Vector([1, 0, 0, 0, 0, 0] )
__a : Any = x.copy()
self.assertEqual(str(__UpperCamelCase ) , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(__UpperCamelCase ) , """(0,1,0)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : List[Any] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : Any = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
__a : List[Any] = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : Union[str, Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : List[str] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main() | 697 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=10 , __UpperCamelCase=18 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=None , ):
'''simple docstring'''
__a : Optional[Any] = size if size is not None else {"""shortest_edge""": 18}
__a : str = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
__a : Tuple = parent
__a : str = batch_size
__a : Optional[Any] = num_channels
__a : Optional[Any] = num_frames
__a : Dict = image_size
__a : str = min_resolution
__a : List[str] = max_resolution
__a : Optional[int] = do_resize
__a : List[Any] = size
__a : Union[str, Any] = do_normalize
__a : Optional[int] = image_mean
__a : Tuple = image_std
__a : Optional[Any] = crop_size
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
lowercase__ = VivitImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = VivitImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """size""" ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
__a : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
__a : Tuple = prepare_video_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for video in video_inputs:
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
__a : Dict = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__a : int = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : Dict = prepare_video_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for video in video_inputs:
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
__a : Optional[Any] = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__a : Any = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : int = prepare_video_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for video in video_inputs:
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
__a : Any = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__a : Dict = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , ) | 697 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__SCREAMING_SNAKE_CASE : List[str] = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
__SCREAMING_SNAKE_CASE : Optional[Any] = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
__SCREAMING_SNAKE_CASE : Tuple = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
__SCREAMING_SNAKE_CASE : Dict = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
__SCREAMING_SNAKE_CASE : Optional[int] = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
__SCREAMING_SNAKE_CASE : int = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
__SCREAMING_SNAKE_CASE : int = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def _snake_case ( ) -> List[str]:
__a , __a : List[Any] = randrange(len(lowercase ) ), randrange(len(lowercase ) )
__a : int = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
__a , __a : int = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _snake_case ( lowercase = 1_0_0 ) -> Any:
return (generate_random_hand() for _ in range(lowercase ))
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> int:
assert PokerHand(lowercase )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Any:
assert PokerHand(lowercase )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , lowercase )
def _snake_case ( lowercase , lowercase , lowercase ) -> List[str]:
__a : Union[str, Any] = PokerHand(lowercase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Optional[int]:
assert PokerHand(lowercase )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Union[str, Any]:
assert PokerHand(lowercase )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , lowercase )
def _snake_case ( lowercase , lowercase , lowercase ) -> Optional[int]:
assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def _snake_case ( lowercase , lowercase , lowercase ) -> int:
assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected
def _snake_case ( ) -> Union[str, Any]:
__a : Tuple = [PokerHand(lowercase ) for hand in SORTED_HANDS]
__a : Optional[int] = poker_hands.copy()
shuffle(lowercase )
__a : List[str] = chain(sorted(lowercase ) )
for index, hand in enumerate(lowercase ):
assert hand == poker_hands[index]
def _snake_case ( ) -> List[str]:
# Test that five high straights are compared correctly.
__a : Optional[int] = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=lowercase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _snake_case ( ) -> List[str]:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
__a : Dict = PokerHand("""2C 4S AS 3D 5C""" )
__a : Dict = True
__a : Optional[int] = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _snake_case ( ) -> Dict:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
__a : Tuple = 0
__a : int = os.path.abspath(os.path.dirname(lowercase ) )
__a : Union[str, Any] = os.path.join(lowercase , """poker_hands.txt""" )
with open(lowercase ) as file_hand:
for line in file_hand:
__a : Union[str, Any] = line[:1_4].strip()
__a : Optional[Any] = line[1_5:].strip()
__a , __a : List[str] = PokerHand(lowercase ), PokerHand(lowercase )
__a : str = player.compare_with(lowercase )
if output == "Win":
answer += 1
assert answer == 3_7_6 | 697 | 1 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ : # Public class to implement a graph
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Dict = row
__a : Optional[int] = col
__a : int = graph
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Tuple = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__a : Optional[Any] = [-1, 0, 1, -1, 1, -1, 0, 1]
__a : Tuple = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __UpperCamelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , __UpperCamelCase )
def __lowerCamelCase ( self ): # And finally, count all islands.
'''simple docstring'''
__a : Optional[int] = [[False for j in range(self.COL )] for i in range(self.ROW )]
__a : Optional[Any] = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
count += 1
return count | 697 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 | 1 |
'''simple docstring'''
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__SCREAMING_SNAKE_CASE : Dict = open # noqa: we just need to have a builtin inside this module to test it properly | 697 |
'''simple docstring'''
from __future__ import annotations
import bisect
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int:
if hi < 0:
__a : Union[str, Any] = len(lowercase )
while lo < hi:
__a : List[str] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__a : int = mid + 1
else:
__a : int = mid
return lo
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int:
if hi < 0:
__a : Any = len(lowercase )
while lo < hi:
__a : Any = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__a : List[str] = mid + 1
else:
__a : Any = mid
return lo
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None:
sorted_collection.insert(bisect_left(lowercase , lowercase , lowercase , lowercase ) , lowercase )
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None:
sorted_collection.insert(bisect_right(lowercase , lowercase , lowercase , lowercase ) , lowercase )
def _snake_case ( lowercase , lowercase ) -> int | None:
__a : Dict = 0
__a : Any = len(lowercase ) - 1
while left <= right:
__a : str = left + (right - left) // 2
__a : List[Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__a : Optional[Any] = midpoint - 1
else:
__a : Optional[int] = midpoint + 1
return None
def _snake_case ( lowercase , lowercase ) -> int | None:
__a : Optional[int] = bisect.bisect_left(lowercase , lowercase )
if index != len(lowercase ) and sorted_collection[index] == item:
return index
return None
def _snake_case ( lowercase , lowercase , lowercase , lowercase ) -> int | None:
if right < left:
return None
__a : Any = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowercase , lowercase , lowercase , midpoint - 1 )
else:
return binary_search_by_recursion(lowercase , lowercase , midpoint + 1 , lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = input('Enter numbers separated by comma:\n').strip()
__SCREAMING_SNAKE_CASE : Optional[Any] = sorted(int(item) for item in user_input.split(','))
__SCREAMING_SNAKE_CASE : List[str] = int(input('Enter a single number to be found in the list:\n'))
__SCREAMING_SNAKE_CASE : Optional[int] = binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''') | 697 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE : int = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 |
'''simple docstring'''
from itertools import product
def _snake_case ( lowercase , lowercase ) -> list[int]:
__a : Optional[int] = sides_number
__a : Union[str, Any] = max_face_number * dice_number
__a : Optional[Any] = [0] * (max_total + 1)
__a : Dict = 1
__a : str = range(lowercase , max_face_number + 1 )
for dice_numbers in product(lowercase , repeat=lowercase ):
__a : int = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def _snake_case ( ) -> float:
__a : Tuple = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__a : Union[str, Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__a : str = 0
__a : Dict = 9
__a : str = 4 * 9
__a : Any = 6
for peter_total in range(lowercase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__a : str = (4**9) * (6**6)
__a : List[Any] = peter_wins_count / total_games_number
__a : List[Any] = round(lowercase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'''{solution() = }''') | 697 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
__SCREAMING_SNAKE_CASE : Tuple = {
'allenai/longformer-base-4096': 4_096,
'allenai/longformer-large-4096': 4_096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _snake_case ( ) -> Any:
__a : Dict = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__a : List[str] = bs[:]
__a : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase )
cs.append(2**8 + n )
n += 1
__a : Optional[int] = [chr(lowercase ) for n in cs]
return dict(zip(lowercase , lowercase ) )
def _snake_case ( lowercase ) -> Optional[int]:
__a : Dict = set()
__a : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__a : str = char
return pairs
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="replace" , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<mask>" , __UpperCamelCase=False , **__UpperCamelCase , ):
'''simple docstring'''
__a : Optional[int] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else bos_token
__a : Optional[int] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else eos_token
__a : Union[str, Any] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else sep_token
__a : str = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else cls_token
__a : List[Any] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else unk_token
__a : Optional[Any] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__a : Optional[Any] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , **__UpperCamelCase , )
with open(__UpperCamelCase , encoding="""utf-8""" ) as vocab_handle:
__a : Dict = json.load(__UpperCamelCase )
__a : Dict = {v: k for k, v in self.encoder.items()}
__a : List[Any] = errors # how to handle errors in decoding
__a : str = bytes_to_unicode()
__a : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(__UpperCamelCase , encoding="""utf-8""" ) as merges_handle:
__a : Any = merges_handle.read().split("""\n""" )[1:-1]
__a : Dict = [tuple(merge.split() ) for merge in bpe_merges]
__a : str = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
__a : Any = {}
__a : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__a : Optional[int] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.encoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__a : List[Any] = tuple(__UpperCamelCase )
__a : List[str] = get_pairs(__UpperCamelCase )
if not pairs:
return token
while True:
__a : List[str] = min(__UpperCamelCase , key=lambda __UpperCamelCase : self.bpe_ranks.get(__UpperCamelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__a , __a : Dict = bigram
__a : int = []
__a : Union[str, Any] = 0
while i < len(__UpperCamelCase ):
try:
__a : str = word.index(__UpperCamelCase , __UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__a : str = j
if word[i] == first and i < len(__UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__a : List[Any] = tuple(__UpperCamelCase )
__a : Union[str, Any] = new_word
if len(__UpperCamelCase ) == 1:
break
else:
__a : Union[str, Any] = get_pairs(__UpperCamelCase )
__a : Tuple = """ """.join(__UpperCamelCase )
__a : int = word
return word
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : List[Any] = []
for token in re.findall(self.pat , __UpperCamelCase ):
__a : Optional[int] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCamelCase ).split(""" """ ) )
return bpe_tokens
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
return self.decoder.get(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : str = """""".join(__UpperCamelCase )
__a : int = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a : int = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__a : int = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCamelCase , ensure_ascii=__UpperCamelCase ) + """\n""" )
__a : Any = 0
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
__a : List[Any] = token_index
writer.write(""" """.join(__UpperCamelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a : str = [self.cls_token_id]
__a : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
'''simple docstring'''
__a : Optional[int] = [self.sep_token_id]
__a : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase ):
'''simple docstring'''
__a : List[Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__UpperCamelCase ) > 0 and not text[0].isspace()):
__a : List[str] = """ """ + text
return (text, kwargs) | 697 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def __lowerCamelCase ( self , __UpperCamelCase = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__( self , __UpperCamelCase , __UpperCamelCase = 512 , __UpperCamelCase = 512 , __UpperCamelCase = 50 , __UpperCamelCase = 7.5 , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = 0.0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Union[str, Any] = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Tuple = len(__UpperCamelCase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__UpperCamelCase )}.""" )
# get prompt text embeddings
__a : Tuple = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__a : Union[str, Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__a : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__a : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__a : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__a , __a , __a : Union[str, Any] = text_embeddings.shape
__a : Optional[Any] = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
__a : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__a : Any = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__a : List[str]
if negative_prompt is None:
__a : Optional[Any] = [""""""]
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !="""
f""" {type(__UpperCamelCase )}.""" )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Any = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
__a : Tuple = negative_prompt
__a : Any = text_input_ids.shape[-1]
__a : List[str] = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" , )
__a : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__a : List[str] = uncond_embeddings.shape[1]
__a : List[Any] = uncond_embeddings.repeat(__UpperCamelCase , __UpperCamelCase , 1 )
__a : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a : List[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__a : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__a : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__a : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__a : Any = torch.randn(
__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(self.device )
__a : Optional[Any] = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(
self.device )
else:
__a : Optional[int] = torch.randn(
__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
__a : str = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__a : Optional[Any] = latents_reference.to(self.device )
__a : str = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__a : List[str] = (latents_shape[3] - latents_shape_reference[3]) // 2
__a : int = (latents_shape[2] - latents_shape_reference[2]) // 2
__a : int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__a : Tuple = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__a : Optional[Any] = 0 if dx < 0 else dx
__a : Optional[Any] = 0 if dy < 0 else dy
__a : Optional[int] = max(-dx , 0 )
__a : Optional[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__a : Optional[int] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__a : Dict = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__a : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a : List[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a : Optional[Any] = {}
if accepts_eta:
__a : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__a : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a : Tuple = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
__a : Union[str, Any] = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
__a , __a : List[str] = noise_pred.chunk(2 )
__a : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__a : List[Any] = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__a : Optional[Any] = 1 / 0.1_8_2_1_5 * latents
__a : Optional[int] = self.vae.decode(__UpperCamelCase ).sample
__a : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__a : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__a : List[str] = self.feature_extractor(self.numpy_to_pil(__UpperCamelCase ) , return_tensors="""pt""" ).to(
self.device )
__a , __a : int = self.safety_checker(
images=__UpperCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__a : Optional[int] = None
if output_type == "pil":
__a : str = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase ) | 697 | 1 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
def _snake_case ( lowercase , lowercase ) -> Optional[int]:
__a : int = nn.functional.normalize(lowercase )
__a : Any = nn.functional.normalize(lowercase )
return torch.mm(lowercase , normalized_text_embeds.t() )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = CLIPConfig
lowercase__ = ["CLIPEncoderLayer"]
def __init__( self , __UpperCamelCase ):
'''simple docstring'''
super().__init__(__UpperCamelCase )
__a : Optional[int] = CLIPVisionModel(config.vision_config )
__a : str = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__UpperCamelCase )
__a : int = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__UpperCamelCase )
__a : Union[str, Any] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__UpperCamelCase )
__a : Union[str, Any] = nn.Parameter(torch.ones(17 ) , requires_grad=__UpperCamelCase )
__a : Union[str, Any] = nn.Parameter(torch.ones(3 ) , requires_grad=__UpperCamelCase )
@torch.no_grad()
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[int] = self.vision_model(__UpperCamelCase )[1] # pooled_output
__a : str = self.visual_projection(__UpperCamelCase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__a : Optional[Any] = cosine_distance(__UpperCamelCase , self.special_care_embeds ).cpu().float().numpy()
__a : Optional[Any] = cosine_distance(__UpperCamelCase , self.concept_embeds ).cpu().float().numpy()
__a : Optional[int] = []
__a : Tuple = image_embeds.shape[0]
for i in range(__UpperCamelCase ):
__a : Optional[Any] = {"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
__a : str = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
__a : Dict = special_cos_dist[i][concept_idx]
__a : List[str] = self.special_care_embeds_weights[concept_idx].item()
__a : List[str] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
__a : Tuple = 0.0_1
for concept_idx in range(len(cos_dist[0] ) ):
__a : List[str] = cos_dist[i][concept_idx]
__a : Optional[Any] = self.concept_embeds_weights[concept_idx].item()
__a : Union[str, Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__UpperCamelCase )
result.append(__UpperCamelCase )
__a : Dict = [len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[Any] = self.vision_model(__UpperCamelCase )[1] # pooled_output
__a : Tuple = self.visual_projection(__UpperCamelCase )
__a : int = cosine_distance(__UpperCamelCase , self.special_care_embeds )
__a : int = cosine_distance(__UpperCamelCase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
__a : Dict = 0.0
__a : Tuple = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
__a : Any = torch.any(special_scores > 0 , dim=1 )
__a : List[str] = special_care * 0.0_1
__a : List[Any] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
__a : Union[str, Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
__a : Any = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts | 697 |
'''simple docstring'''
import numpy as np
from PIL import Image
def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray:
__a : Any = np.array(lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__a : Union[str, Any] = 0
__a : Dict = 0
__a : Optional[Any] = 0
__a : Tuple = 0
# compute the shape of the output matrix
__a : Optional[int] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__a : int = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__a : Optional[Any] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a : Optional[Any] = 0
__a : str = 0
return updated_arr
def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray:
__a : int = np.array(lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__a : int = 0
__a : Optional[Any] = 0
__a : str = 0
__a : List[Any] = 0
# compute the shape of the output matrix
__a : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__a : Optional[int] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__a : Any = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a : str = 0
__a : List[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
__SCREAMING_SNAKE_CASE : str = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show() | 697 | 1 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class SCREAMING_SNAKE_CASE__ ( datasets.BuilderConfig ):
lowercase__ = None
class SCREAMING_SNAKE_CASE__ ( datasets.ArrowBasedBuilder ):
lowercase__ = PandasConfig
def __lowerCamelCase ( self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__a : Any = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__UpperCamelCase , (str, list, tuple) ):
__a : str = data_files
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__a : Tuple = [dl_manager.iter_files(__UpperCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
__a : Any = []
for split_name, files in data_files.items():
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__a : Any = [dl_manager.iter_files(__UpperCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__UpperCamelCase , gen_kwargs={"""files""": files} ) )
return splits
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__a : str = table_cast(__UpperCamelCase , self.config.features.arrow_schema )
return pa_table
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(__UpperCamelCase ) ):
with open(__UpperCamelCase , """rb""" ) as f:
__a : Any = pa.Table.from_pandas(pd.read_pickle(__UpperCamelCase ) )
yield i, self._cast_table(__UpperCamelCase ) | 697 |
'''simple docstring'''
import qiskit
def _snake_case ( lowercase , lowercase ) -> qiskit.result.counts.Counts:
__a : Any = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
__a : str = qiskit.QuantumCircuit(lowercase , lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__a : Any = qiskit.execute(lowercase , lowercase , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''') | 697 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = 42
lowercase__ = 42
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
@torch.no_grad()
def __call__( self , __UpperCamelCase = 1 , __UpperCamelCase = 50 , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , **__UpperCamelCase , ):
'''simple docstring'''
__a : int = self.unet.config.sample_size
__a : Optional[int] = (batch_size, 3, img_size, img_size)
__a : Union[str, Any] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
__a : Dict = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
__a : Dict = self.scheduler.schedule[t]
__a : Any = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
__a , __a : Tuple = self.scheduler.add_noise_to_input(__UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
__a : List[Any] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
__a : str = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
__a : Union[str, Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
__a : Tuple = self.scheduler.step_correct(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , step_output.prev_sample , step_output["""derivative"""] , )
__a : Tuple = step_output.prev_sample
__a : Optional[Any] = (sample / 2 + 0.5).clamp(0 , 1 )
__a : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a : List[Any] = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase ) | 697 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
for attribute in key.split(""".""" ):
__a : str = getattr(lowercase , lowercase )
if weight_type is not None:
__a : Dict = getattr(lowercase , lowercase ).shape
else:
__a : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__a : Any = value
elif weight_type == "weight_g":
__a : int = value
elif weight_type == "weight_v":
__a : int = value
elif weight_type == "bias":
__a : List[Any] = value
elif weight_type == "running_mean":
__a : Union[str, Any] = value
elif weight_type == "running_var":
__a : Tuple = value
elif weight_type == "num_batches_tracked":
__a : Optional[int] = value
elif weight_type == "inv_freq":
__a : List[str] = value
else:
__a : List[str] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _snake_case ( lowercase , lowercase , lowercase ) -> Dict:
__a : Dict = []
__a : Dict = fairseq_model.state_dict()
__a : Tuple = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__a : int = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , )
__a : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
__a : Optional[int] = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__a : str = True
if "*" in mapped_key:
__a : Optional[int] = name.split(lowercase )[0].split(""".""" )[-2]
__a : List[Any] = mapped_key.replace("""*""" , lowercase )
if "pos_bias_u" in name:
__a : Union[str, Any] = None
elif "pos_bias_v" in name:
__a : List[Any] = None
elif "weight_g" in name:
__a : List[Any] = """weight_g"""
elif "weight_v" in name:
__a : List[Any] = """weight_v"""
elif "bias" in name:
__a : Optional[int] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__a : str = """weight"""
elif "running_mean" in name:
__a : List[str] = """running_mean"""
elif "inv_freq" in name:
__a : Dict = """inv_freq"""
elif "running_var" in name:
__a : Union[str, Any] = """running_var"""
elif "num_batches_tracked" in name:
__a : int = """num_batches_tracked"""
else:
__a : Optional[int] = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]:
__a : Optional[Any] = full_name.split("""conv_layers.""" )[-1]
__a : Union[str, Any] = name.split(""".""" )
__a : Optional[Any] = int(items[0] )
__a : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__a : Dict = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__a : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__a : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__a : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _snake_case ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Optional[Any]:
if config_path is not None:
__a : Any = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act="""swish""" )
else:
__a : Optional[int] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__a : Optional[Any] = """rotary"""
if is_finetuned:
if dict_path:
__a : List[Any] = Dictionary.load(lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__a : int = target_dict.pad_index
__a : List[str] = target_dict.bos_index
__a : str = target_dict.eos_index
__a : Dict = len(target_dict.symbols )
__a : Any = os.path.join(lowercase , """vocab.json""" )
if not os.path.isdir(lowercase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase ) )
return
os.makedirs(lowercase , exist_ok=lowercase )
__a : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
__a : Optional[Any] = 0
__a : List[Any] = 1
with open(lowercase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowercase , lowercase )
__a : int = WavaVecaCTCTokenizer(
lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowercase , )
__a : Optional[int] = True if config.feat_extract_norm == """layer""" else False
__a : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
__a : str = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase )
processor.save_pretrained(lowercase )
__a : List[str] = WavaVecaConformerForCTC(lowercase )
else:
__a : Optional[int] = WavaVecaConformerForPreTraining(lowercase )
if is_finetuned:
__a , __a , __a : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__a : Optional[int] = argparse.Namespace(task="""audio_pretraining""" )
__a : Tuple = fairseq.tasks.setup_task(lowercase )
__a , __a , __a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase )
__a : Any = model[0].eval()
recursively_load_weights(lowercase , lowercase , not is_finetuned )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 697 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
__SCREAMING_SNAKE_CASE : int = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=False , ) -> int:
output_path.parent.mkdir(parents=lowercase , exist_ok=lowercase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowercase , lowercase , f=output_path.as_posix() , input_names=lowercase , output_names=lowercase , dynamic_axes=lowercase , do_constant_folding=lowercase , use_external_data_format=lowercase , enable_onnx_checker=lowercase , opset_version=lowercase , )
else:
export(
lowercase , lowercase , f=output_path.as_posix() , input_names=lowercase , output_names=lowercase , dynamic_axes=lowercase , do_constant_folding=lowercase , opset_version=lowercase , )
@torch.no_grad()
def _snake_case ( lowercase , lowercase , lowercase , lowercase = False ) -> int:
__a : List[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__a : List[Any] = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
__a : int = """cpu"""
__a : int = Path(lowercase )
# VAE DECODER
__a : List[Any] = AutoencoderKL.from_pretrained(model_path + """/vae""" )
__a : int = vae_decoder.config.latent_channels
# forward only through the decoder part
__a : Optional[int] = vae_decoder.decode
onnx_export(
lowercase , model_args=(
torch.randn(1 , lowercase , 2_5 , 2_5 ).to(device=lowercase , dtype=lowercase ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=lowercase , )
del vae_decoder
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
__SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('SD: Done: ONNX') | 697 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def _snake_case ( lowercase ) -> Callable:
@wraps(lowercase )
def _inner_fn(*lowercase , **lowercase ):
warnings.warn(
(F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , lowercase , )
return fn(*lowercase , **lowercase )
return _inner_fn | 697 | 1 |
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
__a : Optional[int] = Vector()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(__UpperCamelCase ) , """(0,0,0,0,0,1)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Vector([1, 2, 3, 4] )
self.assertEqual(len(__UpperCamelCase ) , 4 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = Vector([1, 2] )
__a : List[str] = Vector([1, 2, 3, 4, 5] )
__a : Optional[int] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__a : Dict = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
__a : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = Vector([1, 2, 3] )
__a : Any = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Vector([1, 2, 3] )
__a : Optional[Any] = Vector([2, -1, 4] ) # for test of dot product
__a : Union[str, Any] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
__a : Optional[int] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , __UpperCamelCase , __UpperCamelCase ) ) , """(3,4,7)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = Vector([1, 0, 0, 0, 0, 0] )
__a : Any = x.copy()
self.assertEqual(str(__UpperCamelCase ) , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(__UpperCamelCase ) , """(0,1,0)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : List[Any] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : Any = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
__a : List[Any] = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : Union[str, Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : List[str] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main() | 697 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = ["input_features", "attention_mask"]
def __init__( self , __UpperCamelCase=80 , __UpperCamelCase=1_6000 , __UpperCamelCase=0.0 , __UpperCamelCase=10 , __UpperCamelCase=25 , __UpperCamelCase="hamming_window" , __UpperCamelCase=3_2_7_6_8.0 , __UpperCamelCase=0.9_7 , __UpperCamelCase=1.0 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase )
__a : List[str] = feature_size
__a : List[str] = sampling_rate
__a : int = padding_value
__a : Any = hop_length
__a : int = win_length
__a : Tuple = frame_signal_scale
__a : Union[str, Any] = preemphasis_coeff
__a : List[str] = mel_floor
__a : Union[str, Any] = normalize_means
__a : Optional[Any] = normalize_vars
__a : Optional[Any] = win_function
__a : Union[str, Any] = return_attention_mask
__a : List[Any] = win_length * sampling_rate // 1000
__a : List[Any] = hop_length * sampling_rate // 1000
__a : Optional[Any] = optimal_fft_length(self.sample_size )
__a : Any = (self.n_fft // 2) + 1
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
if self.win_function == "hamming_window":
__a : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCamelCase )
else:
__a : Dict = window_function(window_length=self.sample_size , name=self.win_function )
__a : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
__a : Any = spectrogram(
one_waveform * self.frame_signal_scale , window=__UpperCamelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__UpperCamelCase , preemphasis=self.preemphasis_coeff , mel_filters=__UpperCamelCase , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if self.normalize_means:
__a : int = x[:input_length].mean(axis=0 )
__a : str = np.subtract(__UpperCamelCase , __UpperCamelCase )
if self.normalize_vars:
__a : Dict = x[:input_length].std(axis=0 )
__a : Dict = np.divide(__UpperCamelCase , __UpperCamelCase )
if input_length < x.shape[0]:
__a : Union[str, Any] = padding_value
# make sure array is in float32
__a : Any = x.astype(np.floataa )
return x
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
'''simple docstring'''
__a : Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__UpperCamelCase , __UpperCamelCase , self.padding_value ) for x, n in zip(__UpperCamelCase , __UpperCamelCase )]
def __call__( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__a : Tuple = isinstance(__UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__a : Tuple = is_batched_numpy or (
isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__a : Tuple = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ):
__a : List[str] = np.asarray(__UpperCamelCase , dtype=np.floataa )
elif isinstance(__UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__a : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__a : Any = [raw_speech]
# extract fbank features
__a : str = [self._extract_mfsc_features(__UpperCamelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
__a : Optional[Any] = BatchFeature({"""input_features""": features} )
__a : Any = self.pad(
__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
# make sure list is in array format
__a : int = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , __UpperCamelCase ):
__a : Union[str, Any] = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for feature in input_features]
__a : List[str] = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
__a : Optional[int] = [np.asarray(__UpperCamelCase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
__a : Optional[Any] = (
np.array(__UpperCamelCase , dtype=np.intaa )
if self._get_padding_strategies(__UpperCamelCase , max_length=__UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
__a : int = self.normalize(
padded_inputs["""input_features"""] , attention_mask=__UpperCamelCase )
if return_tensors is not None:
__a : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs | 697 | 1 |
'''simple docstring'''
import argparse
import struct
import unittest
class SCREAMING_SNAKE_CASE__ :
def __init__( self , __UpperCamelCase ):
'''simple docstring'''
__a : Tuple = data
# Initialize hash values
__a : Any = [
0X6A_09_E6_67,
0XBB_67_AE_85,
0X3C_6E_F3_72,
0XA5_4F_F5_3A,
0X51_0E_52_7F,
0X9B_05_68_8C,
0X1F_83_D9_AB,
0X5B_E0_CD_19,
]
# Initialize round constants
__a : Optional[int] = [
0X42_8A_2F_98,
0X71_37_44_91,
0XB5_C0_FB_CF,
0XE9_B5_DB_A5,
0X39_56_C2_5B,
0X59_F1_11_F1,
0X92_3F_82_A4,
0XAB_1C_5E_D5,
0XD8_07_AA_98,
0X12_83_5B_01,
0X24_31_85_BE,
0X55_0C_7D_C3,
0X72_BE_5D_74,
0X80_DE_B1_FE,
0X9B_DC_06_A7,
0XC1_9B_F1_74,
0XE4_9B_69_C1,
0XEF_BE_47_86,
0X0F_C1_9D_C6,
0X24_0C_A1_CC,
0X2D_E9_2C_6F,
0X4A_74_84_AA,
0X5C_B0_A9_DC,
0X76_F9_88_DA,
0X98_3E_51_52,
0XA8_31_C6_6D,
0XB0_03_27_C8,
0XBF_59_7F_C7,
0XC6_E0_0B_F3,
0XD5_A7_91_47,
0X06_CA_63_51,
0X14_29_29_67,
0X27_B7_0A_85,
0X2E_1B_21_38,
0X4D_2C_6D_FC,
0X53_38_0D_13,
0X65_0A_73_54,
0X76_6A_0A_BB,
0X81_C2_C9_2E,
0X92_72_2C_85,
0XA2_BF_E8_A1,
0XA8_1A_66_4B,
0XC2_4B_8B_70,
0XC7_6C_51_A3,
0XD1_92_E8_19,
0XD6_99_06_24,
0XF4_0E_35_85,
0X10_6A_A0_70,
0X19_A4_C1_16,
0X1E_37_6C_08,
0X27_48_77_4C,
0X34_B0_BC_B5,
0X39_1C_0C_B3,
0X4E_D8_AA_4A,
0X5B_9C_CA_4F,
0X68_2E_6F_F3,
0X74_8F_82_EE,
0X78_A5_63_6F,
0X84_C8_78_14,
0X8C_C7_02_08,
0X90_BE_FF_FA,
0XA4_50_6C_EB,
0XBE_F9_A3_F7,
0XC6_71_78_F2,
]
__a : Dict = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __lowerCamelCase ( __UpperCamelCase ):
'''simple docstring'''
__a : str = b"""\x80""" + (b"""\x00""" * (63 - (len(__UpperCamelCase ) + 8) % 64))
__a : List[Any] = struct.pack(""">Q""" , (len(__UpperCamelCase ) * 8) )
return data + padding + big_endian_integer
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
__a : Optional[Any] = list(struct.unpack(""">16L""" , __UpperCamelCase ) )
# add 48 0-ed integers
words += [0] * 48
__a , __a , __a , __a , __a , __a , __a , __a : Optional[Any] = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
__a : Optional[Any] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
__a : Any = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
__a : Union[str, Any] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_00_00_00_00
# Compression
__a : List[str] = self.ror(__UpperCamelCase , 6 ) ^ self.ror(__UpperCamelCase , 11 ) ^ self.ror(__UpperCamelCase , 25 )
__a : str = (e & f) ^ ((~e & 0XFF_FF_FF_FF) & g)
__a : Optional[Any] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_00_00_00_00
__a : List[Any] = self.ror(__UpperCamelCase , 2 ) ^ self.ror(__UpperCamelCase , 13 ) ^ self.ror(__UpperCamelCase , 22 )
__a : List[str] = (a & b) ^ (a & c) ^ (b & c)
__a : Optional[Any] = (sa + maj) % 0X1_00_00_00_00
__a , __a , __a , __a , __a , __a , __a , __a : Optional[int] = (
g,
f,
e,
((d + tempa) % 0X1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0X1_00_00_00_00),
)
__a : Any = [a, b, c, d, e, f, g, h]
# Modify final values
__a : Tuple = [
((element + mutated_hash_values[index]) % 0X1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
__a : int = """""".join([hex(__UpperCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
return 0XFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
import hashlib
__a : Dict = bytes("""Test String""" , """utf-8""" )
self.assertEqual(SHAaaa(__UpperCamelCase ).hash , hashlib.shaaaa(__UpperCamelCase ).hexdigest() )
def _snake_case ( ) -> None:
import doctest
doctest.testmod()
__a : str = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
__a : Optional[int] = parser.parse_args()
__a : Tuple = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
__a : List[Any] = f.read()
else:
__a : Union[str, Any] = bytes(lowercase , """utf-8""" )
print(SHAaaa(lowercase ).hash )
if __name__ == "__main__":
main() | 697 |
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = 9.80_665
def _snake_case ( lowercase , lowercase , lowercase = g ) -> float:
if fluid_density <= 0:
raise ValueError("""Impossible fluid density""" )
if volume < 0:
raise ValueError("""Impossible Object volume""" )
if gravity <= 0:
raise ValueError("""Impossible Gravity""" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod() | 697 | 1 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = 42
lowercase__ = 42
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase ):
lowercase__ = 1
@register_to_config
def __init__( self , __UpperCamelCase = 2000 , __UpperCamelCase = 0.1_5 , __UpperCamelCase = 0.0_1 , __UpperCamelCase = 1_3_4_8.0 , __UpperCamelCase = 1E-5 , __UpperCamelCase = 1 , ):
'''simple docstring'''
__a : Any = sigma_max
# setable values
__a : List[str] = None
self.set_sigmas(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
'''simple docstring'''
return sample
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None ):
'''simple docstring'''
__a : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
__a : List[Any] = torch.linspace(1 , __UpperCamelCase , __UpperCamelCase , device=__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None ):
'''simple docstring'''
__a : Union[str, Any] = sigma_min if sigma_min is not None else self.config.sigma_min
__a : Dict = sigma_max if sigma_max is not None else self.config.sigma_max
__a : Optional[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__UpperCamelCase , __UpperCamelCase )
__a : Optional[int] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__a : Union[str, Any] = torch.exp(torch.linspace(math.log(__UpperCamelCase ) , math.log(__UpperCamelCase ) , __UpperCamelCase ) )
__a : Dict = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
__a : Union[str, Any] = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__a : Tuple = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__a : Tuple = timesteps.to(self.discrete_sigmas.device )
__a : Dict = self.discrete_sigmas[timesteps].to(sample.device )
__a : Dict = self.get_adjacent_sigma(__UpperCamelCase , __UpperCamelCase ).to(sample.device )
__a : Optional[int] = torch.zeros_like(__UpperCamelCase )
__a : Union[str, Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__a : Dict = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__a : Tuple = diffusion.unsqueeze(-1 )
__a : Any = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__a : Any = randn_tensor(
sample.shape , layout=sample.layout , generator=__UpperCamelCase , device=sample.device , dtype=sample.dtype )
__a : Union[str, Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__a : int = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__UpperCamelCase , prev_sample_mean=__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__a : Any = randn_tensor(sample.shape , layout=sample.layout , generator=__UpperCamelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__a : Any = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__a : Optional[int] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__a : Any = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__a : List[Any] = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__a : int = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__a : Union[str, Any] = step_size.unsqueeze(-1 )
__a : Any = sample + step_size * model_output
__a : List[Any] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
'''simple docstring'''
__a : Dict = timesteps.to(original_samples.device )
__a : str = self.discrete_sigmas.to(original_samples.device )[timesteps]
__a : Tuple = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__UpperCamelCase ) * sigmas[:, None, None, None]
)
__a : Any = noise + original_samples
return noisy_samples
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps | 697 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=1 / 255 , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=True , ):
'''simple docstring'''
__a : List[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__a : Dict = parent
__a : Union[str, Any] = batch_size
__a : Optional[int] = num_channels
__a : Dict = min_resolution
__a : List[Any] = max_resolution
__a : int = do_resize
__a : str = size
__a : Optional[Any] = do_rescale
__a : Optional[Any] = rescale_factor
__a : str = do_normalize
__a : Any = image_mean
__a : Optional[Any] = image_std
__a : Dict = do_pad
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False ):
'''simple docstring'''
if not batched:
__a : Union[str, Any] = image_inputs[0]
if isinstance(__UpperCamelCase , Image.Image ):
__a , __a : Tuple = image.size
else:
__a , __a : Tuple = image.shape[1], image.shape[2]
if w < h:
__a : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
__a : Tuple = self.size["""shortest_edge"""]
elif w > h:
__a : Optional[Any] = self.size["""shortest_edge"""]
__a : Any = int(self.size["""shortest_edge"""] * w / h )
else:
__a : Any = self.size["""shortest_edge"""]
__a : Optional[int] = self.size["""shortest_edge"""]
else:
__a : Any = []
for image in image_inputs:
__a , __a : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a : List[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0]
__a : Optional[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
lowercase__ = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_rescale""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """rescale_factor""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """size""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_pad""" ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
__a : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a : Optional[int] = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
__a : Any = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
__a : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
__a , __a : str = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
__a : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__a : Dict = json.loads(f.read() )
__a : Optional[int] = {"""image_id""": 3_9769, """annotations""": target}
# encode them
__a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
__a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
__a : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
__a : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
__a : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
__a : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
__a : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
__a : Union[str, Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
__a : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
__a : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify orig_size
__a : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
__a : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__a : Tuple = json.loads(f.read() )
__a : str = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
__a : int = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
__a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
__a : List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
__a : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
__a : Optional[Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
__a : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
__a : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
__a : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
__a : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
__a : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify masks
__a : Union[str, Any] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __UpperCamelCase )
# verify orig_size
__a : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
__a : List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) ) | 697 | 1 |
'''simple docstring'''
from PIL import Image
def _snake_case ( lowercase , lowercase ) -> Image:
__a : List[Any] = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level))
def contrast(lowercase ) -> int:
return int(1_2_8 + factor * (c - 1_2_8) )
return img.point(lowercase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
__SCREAMING_SNAKE_CASE : Dict = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png') | 697 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__SCREAMING_SNAKE_CASE : Optional[int] = trt.Logger(trt.Logger.WARNING)
__SCREAMING_SNAKE_CASE : Tuple = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
if args.tokenizer_name:
__SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__SCREAMING_SNAKE_CASE : List[Any] = args.per_device_eval_batch_size
__SCREAMING_SNAKE_CASE : int = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__SCREAMING_SNAKE_CASE : Optional[Any] = True
__SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__SCREAMING_SNAKE_CASE : Dict = 'temp_engine/bert-fp16.engine'
if args.inta:
__SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__SCREAMING_SNAKE_CASE : Optional[Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__SCREAMING_SNAKE_CASE : List[Any] = [network.get_input(i) for i in range(network.num_inputs)]
__SCREAMING_SNAKE_CASE : List[Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__SCREAMING_SNAKE_CASE : Tuple = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__SCREAMING_SNAKE_CASE : Dict = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__SCREAMING_SNAKE_CASE : Union[str, Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
__a : Dict = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
__a : List[Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
__a : str = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowercase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowercase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowercase )
# start time
__a : Optional[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowercase ) for d_inp in d_inputs] + [int(lowercase ), int(lowercase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase )
cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase )
# Synchronize the stream and take time
stream.synchronize()
# end time
__a : str = time.time()
__a : Any = end_time - start_time
__a : Optional[int] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__SCREAMING_SNAKE_CASE : Optional[Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__SCREAMING_SNAKE_CASE : List[str] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__SCREAMING_SNAKE_CASE : int = raw_datasets['validation'].column_names
__SCREAMING_SNAKE_CASE : Tuple = 'question' if 'question' in column_names else column_names[0]
__SCREAMING_SNAKE_CASE : List[Any] = 'context' if 'context' in column_names else column_names[1]
__SCREAMING_SNAKE_CASE : Tuple = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
__SCREAMING_SNAKE_CASE : Dict = min(args.max_seq_length, tokenizer.model_max_length)
def _snake_case ( lowercase ) -> Tuple:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
__a : Optional[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__a : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowercase , stride=args.doc_stride , return_overflowing_tokens=lowercase , return_offsets_mapping=lowercase , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__a : Optional[Any] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__a : Optional[Any] = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__a : Dict = tokenized_examples.sequence_ids(lowercase )
__a : Optional[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__a : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__a : int = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
__SCREAMING_SNAKE_CASE : int = raw_datasets['validation']
# Validation Feature Creation
__SCREAMING_SNAKE_CASE : Union[str, Any] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__SCREAMING_SNAKE_CASE : List[Any] = default_data_collator
__SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__SCREAMING_SNAKE_CASE : List[str] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _snake_case ( lowercase , lowercase , lowercase , lowercase="eval" ) -> Any:
# Post-processing: we match the start logits and end logits to answers in the original context.
__a : List[str] = postprocess_qa_predictions(
examples=lowercase , features=lowercase , predictions=lowercase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowercase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__a : List[str] = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
__a : List[str] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
__a : Optional[Any] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowercase , label_ids=lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _snake_case ( lowercase ) -> Optional[int]:
return trt.volume(engine.get_binding_shape(lowercase ) ) * engine.get_binding_dtype(lowercase ).itemsize
# Allocate device memory for inputs and outputs.
__SCREAMING_SNAKE_CASE : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__SCREAMING_SNAKE_CASE : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__SCREAMING_SNAKE_CASE : Union[str, Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__SCREAMING_SNAKE_CASE : str = cuda.mem_alloc(h_outputa.nbytes)
__SCREAMING_SNAKE_CASE : Tuple = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__SCREAMING_SNAKE_CASE : Tuple = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : str = timeit.default_timer()
__SCREAMING_SNAKE_CASE : Dict = None
for step, batch in enumerate(eval_dataloader):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = outputs
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(start_logits)
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__SCREAMING_SNAKE_CASE : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__SCREAMING_SNAKE_CASE : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__SCREAMING_SNAKE_CASE : List[str] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__SCREAMING_SNAKE_CASE : List[str] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__SCREAMING_SNAKE_CASE : Tuple = nested_truncate(all_preds, len(eval_dataset))
__SCREAMING_SNAKE_CASE : str = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__SCREAMING_SNAKE_CASE : Optional[int] = post_processing_function(eval_examples, eval_dataset, all_preds)
__SCREAMING_SNAKE_CASE : List[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''') | 697 | 1 |
'''simple docstring'''
from __future__ import annotations
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase , ) -> None:
__a : List[Any] = len(lowercase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowercase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowercase , lowercase , )
def _snake_case ( lowercase ) -> None:
__a : list[list[str]] = []
depth_first_search([] , [] , [] , lowercase , lowercase )
# Print all the boards
for board in boards:
for column in board:
print(lowercase )
print("""""" )
print(len(lowercase ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4) | 697 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = 42
lowercase__ = 42
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
@torch.no_grad()
def __call__( self , __UpperCamelCase = 1 , __UpperCamelCase = 50 , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , **__UpperCamelCase , ):
'''simple docstring'''
__a : int = self.unet.config.sample_size
__a : Optional[int] = (batch_size, 3, img_size, img_size)
__a : Union[str, Any] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
__a : Dict = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
__a : Dict = self.scheduler.schedule[t]
__a : Any = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
__a , __a : Tuple = self.scheduler.add_noise_to_input(__UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
__a : List[Any] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
__a : str = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
__a : Union[str, Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
__a : Tuple = self.scheduler.step_correct(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , step_output.prev_sample , step_output["""derivative"""] , )
__a : Tuple = step_output.prev_sample
__a : Optional[Any] = (sample / 2 + 0.5).clamp(0 , 1 )
__a : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a : List[Any] = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase ) | 697 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(lowercase ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , lowercase , lowercase , lowercase ) , minimax(depth + 1 , node_index * 2 + 1 , lowercase , lowercase , lowercase ) , )
return min(
minimax(depth + 1 , node_index * 2 , lowercase , lowercase , lowercase ) , minimax(depth + 1 , node_index * 2 + 1 , lowercase , lowercase , lowercase ) , )
def _snake_case ( ) -> None:
__a : Any = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
__a : int = math.log(len(lowercase ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , lowercase , lowercase , lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 697 |
'''simple docstring'''
def _snake_case ( lowercase ) -> bool:
if not isinstance(lowercase , lowercase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
__a : str = str(lowercase )
__a : Any = """""".join(sorted(lowercase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _snake_case ( lowercase = 9_9 ) -> int:
if not 0 < percent < 1_0_0:
raise ValueError("""solution() only accepts values from 0 to 100""" )
__a : List[str] = 0
__a : Union[str, Any] = 1
while True:
if check_bouncy(lowercase ):
bouncy_num += 1
if (bouncy_num / num) * 1_0_0 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''') | 697 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def _snake_case ( lowercase , lowercase ) -> float:
__a : Optional[Any] = u
for i in range(1 , lowercase ):
__a : Any = temp * (u - i)
return temp
def _snake_case ( ) -> None:
__a : Union[str, Any] = int(input("""enter the numbers of values: """ ) )
__a : list[list[float]] = []
for _ in range(lowercase ):
y.append([] )
for i in range(lowercase ):
for j in range(lowercase ):
y[i].append(lowercase )
__a : Tuple = 0
print("""enter the values of parameters in a list: """ )
__a : List[str] = list(map(lowercase , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(lowercase ):
__a : Optional[Any] = float(input() )
__a : str = int(input("""enter the value to interpolate: """ ) )
__a : str = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , lowercase ):
for j in range(n - i ):
__a : List[Any] = y[j + 1][i - 1] - y[j][i - 1]
__a : Optional[Any] = y[0][0]
for i in range(1 , lowercase ):
summ += (ucal(lowercase , lowercase ) * y[0][i]) / math.factorial(lowercase )
print(F"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main() | 697 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _snake_case ( lowercase , lowercase , lowercase ) -> Any:
# Construct model
if gpta_config_file == "":
__a : Dict = GPTaConfig()
else:
__a : Optional[Any] = GPTaConfig.from_json_file(lowercase )
__a : Union[str, Any] = GPTaModel(lowercase )
# Load weights from numpy
load_tf_weights_in_gpta(lowercase , lowercase , lowercase )
# Save pytorch-model
__a : Optional[int] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__a : Dict = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowercase )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path) | 697 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = self.dummy_uncond_unet
__a : Optional[Any] = PNDMScheduler()
__a : List[Any] = PNDMPipeline(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
pndm.to(__UpperCamelCase )
pndm.set_progress_bar_config(disable=__UpperCamelCase )
__a : Any = torch.manual_seed(0 )
__a : Optional[int] = pndm(generator=__UpperCamelCase , num_inference_steps=20 , output_type="""numpy""" ).images
__a : Dict = torch.manual_seed(0 )
__a : List[str] = pndm(generator=__UpperCamelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=__UpperCamelCase )[0]
__a : Any = image[0, -3:, -3:, -1]
__a : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a : str = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = """google/ddpm-cifar10-32"""
__a : int = UNetaDModel.from_pretrained(__UpperCamelCase )
__a : List[str] = PNDMScheduler()
__a : Tuple = PNDMPipeline(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
pndm.to(__UpperCamelCase )
pndm.set_progress_bar_config(disable=__UpperCamelCase )
__a : Optional[int] = torch.manual_seed(0 )
__a : Optional[int] = pndm(generator=__UpperCamelCase , output_type="""numpy""" ).images
__a : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a : int = np.array([0.1_5_6_4, 0.1_4_6_4_5, 0.1_4_0_6, 0.1_4_7_1_5, 0.1_2_4_2_5, 0.1_4_0_4_5, 0.1_3_1_1_5, 0.1_2_1_7_5, 0.1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 697 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def __lowerCamelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = ObjectDetectionPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 )
self.assertGreater(len(__UpperCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__UpperCamelCase , {
"""score""": ANY(__UpperCamelCase ),
"""label""": ANY(__UpperCamelCase ),
"""box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )},
} , )
import datasets
__a : Optional[int] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
__a : Tuple = [
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
__a : Any = object_detector(__UpperCamelCase , threshold=0.0 )
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for outputs in batch_outputs:
self.assertGreater(len(__UpperCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__UpperCamelCase , {
"""score""": ANY(__UpperCamelCase ),
"""label""": ANY(__UpperCamelCase ),
"""box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )},
} , )
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = """hf-internal-testing/tiny-detr-mobilenetsv3"""
__a : Dict = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase )
__a : Optional[Any] = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
__a : str = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__a : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
] , )
__a : Union[str, Any] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
[
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = """facebook/detr-resnet-50"""
__a : Dict = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase )
__a : int = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
__a : int = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__a : Any = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
__a : Optional[Any] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = """facebook/detr-resnet-50"""
__a : Optional[int] = pipeline("""object-detection""" , model=__UpperCamelCase )
__a : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
__a : List[str] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = 0.9_9_8_5
__a : Union[str, Any] = """facebook/detr-resnet-50"""
__a : Optional[int] = pipeline("""object-detection""" , model=__UpperCamelCase )
__a : Union[str, Any] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=__UpperCamelCase )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = """Narsil/layoutlmv3-finetuned-funsd"""
__a : List[Any] = 0.9_9_9_3
__a : Dict = pipeline("""object-detection""" , model=__UpperCamelCase , threshold=__UpperCamelCase )
__a : List[str] = object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
{"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
] , ) | 697 | 1 |
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def _snake_case ( lowercase , lowercase ) -> List[str]:
__a : Any = XCLIPTextConfig()
# derive patch size from model name
__a : str = model_name.find("""patch""" )
__a : Dict = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
__a : Dict = XCLIPVisionConfig(patch_size=lowercase , num_frames=lowercase )
if "large" in model_name:
__a : Optional[Any] = 7_6_8
__a : int = 3_0_7_2
__a : Union[str, Any] = 1_2
__a : List[Any] = 1_0_2_4
__a : str = 4_0_9_6
__a : Any = 1_6
__a : Optional[Any] = 2_4
__a : str = 7_6_8
__a : Any = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
__a : List[str] = 3_3_6
__a : str = XCLIPConfig.from_text_vision_configs(lowercase , lowercase )
if "large" in model_name:
__a : List[Any] = 7_6_8
return config
def _snake_case ( lowercase ) -> List[str]:
# text encoder
if name == "token_embedding.weight":
__a : Optional[Any] = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
__a : int = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
__a : List[str] = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
__a : Tuple = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
__a : str = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
__a : List[Any] = name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
__a : List[str] = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
__a : Optional[Any] = name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
__a : Any = name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
__a : Tuple = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
__a : Any = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
__a : str = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
__a : Optional[int] = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
__a : int = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
__a : Union[str, Any] = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
__a : Tuple = name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
__a : Optional[int] = name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
__a : Union[str, Any] = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
__a : Union[str, Any] = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
__a : str = name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
__a : Optional[int] = name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
__a : Union[str, Any] = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def _snake_case ( lowercase , lowercase ) -> Any:
for key in orig_state_dict.copy().keys():
__a : int = orig_state_dict.pop(lowercase )
if "attn.in_proj" in key:
__a : Union[str, Any] = key.split(""".""" )
if key.startswith("""visual""" ):
__a : Dict = key_split[3]
__a : List[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__a : Union[str, Any] = val[
:dim, :
]
__a : Optional[int] = val[
dim : dim * 2, :
]
__a : str = val[
-dim:, :
]
else:
__a : Optional[int] = val[
:dim
]
__a : Optional[Any] = val[
dim : dim * 2
]
__a : List[str] = val[
-dim:
]
else:
if "weight" in key:
__a : Any = val[
:dim, :
]
__a : str = val[
dim : dim * 2, :
]
__a : Tuple = val[
-dim:, :
]
else:
__a : Dict = val[:dim]
__a : Dict = val[
dim : dim * 2
]
__a : List[str] = val[-dim:]
elif key.startswith("""mit""" ):
__a : int = key_split[2]
__a : int = config.vision_config.mit_hidden_size
if "weight" in key:
__a : Union[str, Any] = val[:dim, :]
__a : int = val[dim : dim * 2, :]
__a : Tuple = val[-dim:, :]
else:
__a : Optional[int] = val[:dim]
__a : Optional[int] = val[dim : dim * 2]
__a : Any = val[-dim:]
else:
__a : List[str] = key_split[2]
__a : Optional[Any] = config.text_config.hidden_size
if "weight" in key:
__a : Optional[int] = val[:dim, :]
__a : Any = val[
dim : dim * 2, :
]
__a : Any = val[-dim:, :]
else:
__a : Any = val[:dim]
__a : Tuple = val[
dim : dim * 2
]
__a : Tuple = val[-dim:]
else:
__a : Union[str, Any] = rename_key(lowercase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__a : List[Any] = val.T
__a : List[Any] = val
return orig_state_dict
def _snake_case ( lowercase ) -> Optional[Any]:
if num_frames == 8:
__a : List[str] = """eating_spaghetti_8_frames.npy"""
elif num_frames == 1_6:
__a : List[str] = """eating_spaghetti.npy"""
elif num_frames == 3_2:
__a : Union[str, Any] = """eating_spaghetti_32_frames.npy"""
__a : Optional[int] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=lowercase , repo_type="""dataset""" , )
__a : int = np.load(lowercase )
return list(lowercase )
def _snake_case ( lowercase , lowercase=None , lowercase=False ) -> Union[str, Any]:
__a : Union[str, Any] = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
__a : Optional[int] = model_to_url[model_name]
__a : List[str] = 8
if "16-frames" in model_name:
__a : Optional[int] = 1_6
elif "shot" in model_name:
__a : Optional[int] = 3_2
__a : int = get_xclip_config(lowercase , lowercase )
__a : Any = XCLIPModel(lowercase )
model.eval()
if "drive" in checkpoint_url:
__a : Union[str, Any] = """pytorch_model.bin"""
gdown.cached_download(lowercase , lowercase , quiet=lowercase )
__a : Optional[int] = torch.load(lowercase , map_location="""cpu""" )["""model"""]
else:
__a : Union[str, Any] = torch.hub.load_state_dict_from_url(lowercase )["""model"""]
__a : Union[str, Any] = convert_state_dict(lowercase , lowercase )
__a : List[str] = XCLIPModel(lowercase )
__a , __a : Union[str, Any] = model.load_state_dict(lowercase , strict=lowercase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__a : List[str] = 3_3_6 if model_name == """xclip-large-patch14-16-frames""" else 2_2_4
__a : Optional[int] = VideoMAEImageProcessor(size=lowercase )
__a : str = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
__a : Optional[Any] = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
__a : int = XCLIPProcessor(image_processor=lowercase , tokenizer=lowercase )
__a : Dict = prepare_video(lowercase )
__a : List[Any] = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=lowercase , return_tensors="""pt""" , padding=lowercase )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
__a : Optional[int] = model(**lowercase )
# Verify outputs
__a : Tuple = outputs.logits_per_video
__a : Optional[int] = logits_per_video.softmax(dim=1 )
print("""Probs:""" , lowercase )
# kinetics-400
if model_name == "xclip-base-patch32":
__a : str = torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]] )
elif model_name == "xclip-base-patch32-16-frames":
__a : Tuple = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] )
elif model_name == "xclip-base-patch16":
__a : Optional[int] = torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]] )
elif model_name == "xclip-base-patch16-16-frames":
__a : Tuple = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] )
elif model_name == "xclip-large-patch14":
__a : str = torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]] )
elif model_name == "xclip-large-patch14-16-frames":
__a : Dict = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__a : List[Any] = torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__a : Optional[Any] = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__a : Tuple = torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__a : int = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__a : Union[str, Any] = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__a : Optional[Any] = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__a : List[Any] = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__a : Dict = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__a : Optional[Any] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__a : List[str] = torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__a : List[str] = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__a : List[str] = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] )
else:
raise ValueError(F"""Model name {model_name} not supported""" )
assert torch.allclose(lowercase , lowercase , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(lowercase , organization="""nielsr""" )
processor.push_to_hub(lowercase , organization="""nielsr""" )
slow_tokenizer.push_to_hub(lowercase , organization="""nielsr""" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='xclip-base-patch32',
type=str,
help='Name of the model.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 697 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : List[str] = {
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = [
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 | 1 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : List[Any] = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
lowercase__ = PegasusTokenizer
lowercase__ = PegasusTokenizerFast
lowercase__ = True
lowercase__ = True
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a : Optional[Any] = PegasusTokenizer(__UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def __lowerCamelCase ( self , **__UpperCamelCase ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
return ("This is a test", "This is a test")
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = """</s>"""
__a : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(__UpperCamelCase ) , 1103 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__a : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname )
__a : Any = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
__a : List[str] = rust_tokenizer([raw_input_str] , return_tensors=__UpperCamelCase , add_special_tokens=__UpperCamelCase ).input_ids[0]
__a : str = py_tokenizer([raw_input_str] , return_tensors=__UpperCamelCase , add_special_tokens=__UpperCamelCase ).input_ids[0]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__a : Any = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
__a : Optional[int] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
__a : str = tokenizer([raw_input_str] , return_tensors=__UpperCamelCase ).input_ids[0]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
__a : Optional[Any] = """To ensure a smooth flow of bank resolutions."""
__a : Dict = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
__a : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=__UpperCamelCase ).input_ids[0]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = ["""This is going to be way too long.""" * 150, """short example"""]
__a : Dict = ["""not super long but more than 5 tokens""", """tiny"""]
__a : str = self._large_tokenizer(__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" )
__a : Dict = self._large_tokenizer(
text_target=__UpperCamelCase , max_length=5 , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(__UpperCamelCase ) == 2 # input_ids, attention_mask.
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
lowercase__ = PegasusTokenizer
lowercase__ = PegasusTokenizerFast
lowercase__ = True
lowercase__ = True
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a : str = PegasusTokenizer(__UpperCamelCase , offset=0 , mask_token_sent=__UpperCamelCase , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def __lowerCamelCase ( self , **__UpperCamelCase ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
return ("This is a test", "This is a test")
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__a : Any = self.tokenizer_class.from_pretrained(self.tmpdirname )
__a : Dict = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
__a : Optional[int] = rust_tokenizer([raw_input_str] , return_tensors=__UpperCamelCase , add_special_tokens=__UpperCamelCase ).input_ids[0]
__a : Tuple = py_tokenizer([raw_input_str] , return_tensors=__UpperCamelCase , add_special_tokens=__UpperCamelCase ).input_ids[0]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = ["""This is going to be way too long.""" * 1000, """short example"""]
__a : Union[str, Any] = ["""not super long but more than 5 tokens""", """tiny"""]
__a : List[str] = self._large_tokenizer(__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" )
__a : List[Any] = self._large_tokenizer(
text_target=__UpperCamelCase , max_length=5 , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(__UpperCamelCase ) == 2 # input_ids, attention_mask.
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
__a : Optional[int] = self._large_tokenizer(__UpperCamelCase ).input_ids
self.assertListEqual(
__UpperCamelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , ) | 697 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Any = params
__a : Optional[Any] = np.array(__UpperCamelCase )
__a : Union[str, Any] = np.array([len(__UpperCamelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __UpperCamelCase ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
'''simple docstring'''
return len(self.lengths )
def __lowerCamelCase ( self ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = self.params.max_model_input_size
__a : Union[str, Any] = self.lengths > max_len
logger.info(f"""Splitting {sum(__UpperCamelCase )} too long sequences.""" )
def divide_chunks(__UpperCamelCase , __UpperCamelCase ):
return [l[i : i + n] for i in range(0 , len(__UpperCamelCase ) , __UpperCamelCase )]
__a : int = []
__a : Union[str, Any] = []
if self.params.mlm:
__a , __a : Any = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
__a , __a : str = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__a : Any = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__a : int = np.insert(__UpperCamelCase , 0 , __UpperCamelCase )
if sub_s[-1] != sep_id:
__a : str = np.insert(__UpperCamelCase , len(__UpperCamelCase ) , __UpperCamelCase )
assert len(__UpperCamelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__UpperCamelCase )
new_tok_ids.extend(__UpperCamelCase )
new_lengths.extend([len(__UpperCamelCase ) for l in sub_seqs] )
__a : Dict = np.array(__UpperCamelCase )
__a : Tuple = np.array(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = len(self )
__a : List[str] = self.lengths > 11
__a : int = self.token_ids[indices]
__a : Union[str, Any] = self.lengths[indices]
__a : Any = len(self )
logger.info(f"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
__a : List[str] = self.params.special_tok_ids["""unk_token"""]
__a : str = len(self )
__a : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__a : Optional[Any] = (unk_occs / self.lengths) < 0.5
__a : List[str] = self.token_ids[indices]
__a : Optional[int] = self.lengths[indices]
__a : Any = len(self )
logger.info(f"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(f"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = [t[0] for t in batch]
__a : str = [t[1] for t in batch]
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
# Max for paddings
__a : Optional[int] = max(__UpperCamelCase )
# Pad token ids
if self.params.mlm:
__a : int = self.params.special_tok_ids["""pad_token"""]
else:
__a : Tuple = self.params.special_tok_ids["""unk_token"""]
__a : Any = [list(t.astype(__UpperCamelCase ) ) + [pad_idx] * (max_seq_len_ - len(__UpperCamelCase )) for t in token_ids]
assert len(tk_ ) == len(__UpperCamelCase )
assert all(len(__UpperCamelCase ) == max_seq_len_ for t in tk_ )
__a : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
__a : Optional[Any] = torch.tensor(__UpperCamelCase ) # (bs)
return tk_t, lg_t | 697 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = "levit"
def __init__( self , __UpperCamelCase=224 , __UpperCamelCase=3 , __UpperCamelCase=3 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=16 , __UpperCamelCase=[128, 256, 384] , __UpperCamelCase=[4, 8, 12] , __UpperCamelCase=[4, 4, 4] , __UpperCamelCase=[16, 16, 16] , __UpperCamelCase=0 , __UpperCamelCase=[2, 2, 2] , __UpperCamelCase=[2, 2, 2] , __UpperCamelCase=0.0_2 , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(**__UpperCamelCase )
__a : int = image_size
__a : str = num_channels
__a : str = kernel_size
__a : Tuple = stride
__a : List[Any] = padding
__a : Optional[Any] = hidden_sizes
__a : str = num_attention_heads
__a : Optional[Any] = depths
__a : Union[str, Any] = key_dim
__a : List[str] = drop_path_rate
__a : Union[str, Any] = patch_size
__a : Optional[Any] = attention_ratio
__a : List[str] = mlp_ratio
__a : Union[str, Any] = initializer_range
__a : Optional[int] = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = version.parse("1.11" )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 1E-4 | 697 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = ""
lowercase__ = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(self , **__UpperCamelCase )
__a : int = repo_info
__a : int = token
__a : Any = None
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.dir_cache is None:
__a : Union[str, Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__a : List[str] = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"""name""": str(__UpperCamelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , ):
'''simple docstring'''
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
__a : Any = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __lowerCamelCase ( self , __UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
self._get_dirs()
__a : str = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase ):
'''simple docstring'''
self._get_dirs()
__a : int = PurePosixPath(path.strip("""/""" ) )
__a : List[str] = {}
for p, f in self.dir_cache.items():
__a : str = PurePosixPath(p.strip("""/""" ) )
__a : Optional[int] = p.parent
if root == path:
__a : List[str] = f
__a : str = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out ) | 697 | 1 |
'''simple docstring'''
from collections.abc import Generator
def _snake_case ( ) -> Generator[int, None, None]:
__a , __a : Optional[Any] = 0, 1
while True:
__a , __a : List[str] = b, a + b
yield b
def _snake_case ( lowercase = 1_0_0_0 ) -> int:
__a : Dict = 1
__a : Union[str, Any] = fibonacci_generator()
while len(str(next(lowercase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 697 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , __UpperCamelCase , __UpperCamelCase=2 , __UpperCamelCase=32 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=4 , __UpperCamelCase=[0, 1, 2, 3] , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0_2 , __UpperCamelCase=3 , __UpperCamelCase=[1, 384, 24, 24] , __UpperCamelCase=True , __UpperCamelCase=None , ):
'''simple docstring'''
__a : List[str] = parent
__a : Tuple = batch_size
__a : str = image_size
__a : int = patch_size
__a : Dict = num_channels
__a : int = is_training
__a : Dict = use_labels
__a : Union[str, Any] = hidden_size
__a : Dict = num_hidden_layers
__a : Dict = backbone_out_indices
__a : Optional[int] = num_attention_heads
__a : List[str] = intermediate_size
__a : Optional[Any] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Tuple = attention_probs_dropout_prob
__a : Any = initializer_range
__a : Any = num_labels
__a : Optional[Any] = backbone_featmap_shape
__a : List[Any] = scope
__a : List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__a : Union[str, Any] = (image_size // patch_size) ** 2
__a : List[str] = num_patches + 1
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Union[str, Any] = None
if self.use_labels:
__a : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 192, 384, 768],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = DPTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = self.num_labels
__a : Union[str, Any] = DPTForDepthEstimation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Dict = self.num_labels
__a : Tuple = DPTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : str = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
lowercase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowercase__ = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = DPTModelTester(self )
__a : List[Any] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : str = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Any = model_class(__UpperCamelCase )
__a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : int = [*signature.parameters.keys()]
__a : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__a : List[Any] = True
if model_class in get_values(__UpperCamelCase ):
continue
__a : str = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
__a : Union[str, Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__a : List[Any] = model(**__UpperCamelCase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = False
__a : Dict = True
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
__a : Any = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
__a : List[str] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__a : Dict = model(**__UpperCamelCase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
__a : Any = model_class(config=__UpperCamelCase )
# Skip the check for the backbone
__a : Optional[Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__a : Optional[int] = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__a : int = DPTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : int = self.model_tester.prepare_config_and_inputs_for_common()
__a : Optional[int] = """add"""
with self.assertRaises(__UpperCamelCase ):
__a : int = DPTForDepthEstimation(__UpperCamelCase )
def _snake_case ( ) -> Any:
__a : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
__a : int = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__UpperCamelCase )
__a : Union[str, Any] = prepare_img()
__a : Any = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__a : Optional[Any] = model(**__UpperCamelCase )
__a : int = outputs.predicted_depth
# verify the predicted depth
__a : Any = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __UpperCamelCase )
__a : int = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __UpperCamelCase , atol=1E-4 ) ) | 697 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__SCREAMING_SNAKE_CASE : Optional[Any] = 16
__SCREAMING_SNAKE_CASE : Union[str, Any] = 32
def _snake_case ( lowercase , lowercase = 1_6 ) -> Optional[int]:
__a : List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__a : Tuple = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
__a : Any = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__a : Optional[Any] = datasets.map(
lowercase , batched=lowercase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a : int = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__a : List[Any] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__a : Union[str, Any] = 1_6
elif accelerator.mixed_precision != "no":
__a : str = 8
else:
__a : Union[str, Any] = None
return tokenizer.pad(
lowercase , padding="""longest""" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="""pt""" , )
# Instantiate dataloaders.
__a : Union[str, Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
__a : List[str] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__SCREAMING_SNAKE_CASE : int = mocked_dataloaders # noqa: F811
def _snake_case ( lowercase , lowercase ) -> Optional[int]:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase ) == "1":
__a : Any = 2
# New Code #
__a : List[Any] = int(args.gradient_accumulation_steps )
# Initialize accelerator
__a : Optional[int] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a : Union[str, Any] = config["""lr"""]
__a : List[str] = int(config["""num_epochs"""] )
__a : Dict = int(config["""seed"""] )
__a : Optional[int] = int(config["""batch_size"""] )
__a : int = evaluate.load("""glue""" , """mrpc""" )
set_seed(lowercase )
__a , __a : str = get_dataloaders(lowercase , lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a : Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__a : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
__a : Union[str, Any] = AdamW(params=model.parameters() , lr=lowercase )
# Instantiate scheduler
__a : List[str] = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=1_0_0 , num_training_steps=(len(lowercase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a : Dict = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase ):
__a : Union[str, Any] = model(**lowercase )
__a : Tuple = output.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__a : Union[str, Any] = model(**lowercase )
__a : List[str] = outputs.logits.argmax(dim=-1 )
__a , __a : Dict = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase , references=lowercase , )
__a : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , lowercase )
def _snake_case ( ) -> Optional[Any]:
__a : Union[str, Any] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase , default=lowercase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=lowercase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
__a : int = parser.parse_args()
__a : Union[str, Any] = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main() | 697 |
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
__a : Optional[int] = Vector()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(__UpperCamelCase ) , """(0,0,0,0,0,1)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Vector([1, 2, 3, 4] )
self.assertEqual(len(__UpperCamelCase ) , 4 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = Vector([1, 2] )
__a : List[str] = Vector([1, 2, 3, 4, 5] )
__a : Optional[int] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__a : Dict = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
__a : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = Vector([1, 2, 3] )
__a : Any = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Vector([1, 2, 3] )
__a : Optional[Any] = Vector([2, -1, 4] ) # for test of dot product
__a : Union[str, Any] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
__a : Optional[int] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , __UpperCamelCase , __UpperCamelCase ) ) , """(3,4,7)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = Vector([1, 0, 0, 0, 0, 0] )
__a : Any = x.copy()
self.assertEqual(str(__UpperCamelCase ) , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(__UpperCamelCase ) , """(0,1,0)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : List[Any] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : Any = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
__a : List[Any] = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : Union[str, Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : List[str] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main() | 697 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
__SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowercase__ = field(
default=__UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowercase__ = field(
default=__UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowercase__ = field(
default=__UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowercase__ = field(
default=__UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
lowercase__ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowercase__ = field(
default=__UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = field(default=__UpperCamelCase , metadata={"help": "The input training data file (a text file)."} )
lowercase__ = field(
default=__UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
lowercase__ = field(
default=__UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowercase__ = field(
default=__UpperCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowercase__ = field(
default=__UpperCamelCase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowercase__ = field(
default=__UpperCamelCase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
lowercase__ = field(
default=__UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowercase__ = field(
default=__UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.train_file is not None:
__a : Dict = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__a : Dict = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = 42
lowercase__ = True
lowercase__ = None
lowercase__ = None
def __call__( self , __UpperCamelCase ):
'''simple docstring'''
__a : List[Any] = """label""" if """label""" in features[0].keys() else """labels"""
__a : Union[str, Any] = [feature.pop(__UpperCamelCase ) for feature in features]
__a : str = len(__UpperCamelCase )
__a : Dict = len(features[0]["""input_ids"""] )
__a : str = [
[{k: v[i] for k, v in feature.items()} for i in range(__UpperCamelCase )] for feature in features
]
__a : List[Any] = list(chain(*__UpperCamelCase ) )
__a : int = self.tokenizer.pad(
__UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
__a : Optional[int] = {k: v.view(__UpperCamelCase , __UpperCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
__a : List[str] = torch.tensor(__UpperCamelCase , dtype=torch.intaa )
return batch
def _snake_case ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__a : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__a , __a , __a : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__a , __a , __a : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , lowercase , lowercase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__a : Dict = training_args.get_process_log_level()
logger.setLevel(lowercase )
datasets.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__a : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__a : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__a : List[str] = {}
if data_args.train_file is not None:
__a : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
__a : Dict = data_args.validation_file
__a : Tuple = data_args.train_file.split(""".""" )[-1]
__a : Dict = load_dataset(
lowercase , data_files=lowercase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__a : str = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__a : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__a : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__a : int = [F"""ending{i}""" for i in range(4 )]
__a : Dict = """sent1"""
__a : Optional[Any] = """sent2"""
if data_args.max_seq_length is None:
__a : Dict = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
__a : Tuple = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__a : int = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowercase ):
__a : Optional[int] = [[context] * 4 for context in examples[context_name]]
__a : Optional[int] = examples[question_header_name]
__a : Optional[Any] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(lowercase )
]
# Flatten out
__a : Tuple = list(chain(*lowercase ) )
__a : Tuple = list(chain(*lowercase ) )
# Tokenize
__a : Union[str, Any] = tokenizer(
lowercase , lowercase , truncation=lowercase , max_length=lowercase , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowercase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__a : str = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__a : Optional[int] = min(len(lowercase ) , data_args.max_train_samples )
__a : int = train_dataset.select(range(lowercase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
__a : Tuple = train_dataset.map(
lowercase , batched=lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__a : Optional[Any] = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__a : int = min(len(lowercase ) , data_args.max_eval_samples )
__a : int = eval_dataset.select(range(lowercase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
__a : Union[str, Any] = eval_dataset.map(
lowercase , batched=lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__a : Any = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowercase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowercase ):
__a , __a : List[Any] = eval_predictions
__a : Optional[Any] = np.argmax(lowercase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__a : List[Any] = Trainer(
model=lowercase , args=lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowercase , data_collator=lowercase , compute_metrics=lowercase , )
# Training
if training_args.do_train:
__a : List[Any] = None
if training_args.resume_from_checkpoint is not None:
__a : int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__a : List[Any] = last_checkpoint
__a : Optional[Any] = trainer.train(resume_from_checkpoint=lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
__a : str = train_result.metrics
__a : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase )
)
__a : Dict = min(lowercase , len(lowercase ) )
trainer.log_metrics("""train""" , lowercase )
trainer.save_metrics("""train""" , lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__a : Union[str, Any] = trainer.evaluate()
__a : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase )
__a : Union[str, Any] = min(lowercase , len(lowercase ) )
trainer.log_metrics("""eval""" , lowercase )
trainer.save_metrics("""eval""" , lowercase )
__a : List[Any] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase )
else:
trainer.create_model_card(**lowercase )
def _snake_case ( lowercase ) -> int:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 697 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__SCREAMING_SNAKE_CASE : List[str] = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
__SCREAMING_SNAKE_CASE : Optional[Any] = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
__SCREAMING_SNAKE_CASE : Tuple = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
__SCREAMING_SNAKE_CASE : Dict = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
__SCREAMING_SNAKE_CASE : Optional[int] = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
__SCREAMING_SNAKE_CASE : int = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
__SCREAMING_SNAKE_CASE : int = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def _snake_case ( ) -> List[str]:
__a , __a : List[Any] = randrange(len(lowercase ) ), randrange(len(lowercase ) )
__a : int = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
__a , __a : int = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _snake_case ( lowercase = 1_0_0 ) -> Any:
return (generate_random_hand() for _ in range(lowercase ))
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> int:
assert PokerHand(lowercase )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Any:
assert PokerHand(lowercase )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , lowercase )
def _snake_case ( lowercase , lowercase , lowercase ) -> List[str]:
__a : Union[str, Any] = PokerHand(lowercase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Optional[int]:
assert PokerHand(lowercase )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Union[str, Any]:
assert PokerHand(lowercase )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , lowercase )
def _snake_case ( lowercase , lowercase , lowercase ) -> Optional[int]:
assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def _snake_case ( lowercase , lowercase , lowercase ) -> int:
assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected
def _snake_case ( ) -> Union[str, Any]:
__a : Tuple = [PokerHand(lowercase ) for hand in SORTED_HANDS]
__a : Optional[int] = poker_hands.copy()
shuffle(lowercase )
__a : List[str] = chain(sorted(lowercase ) )
for index, hand in enumerate(lowercase ):
assert hand == poker_hands[index]
def _snake_case ( ) -> List[str]:
# Test that five high straights are compared correctly.
__a : Optional[int] = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=lowercase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _snake_case ( ) -> List[str]:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
__a : Dict = PokerHand("""2C 4S AS 3D 5C""" )
__a : Dict = True
__a : Optional[int] = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _snake_case ( ) -> Dict:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
__a : Tuple = 0
__a : int = os.path.abspath(os.path.dirname(lowercase ) )
__a : Union[str, Any] = os.path.join(lowercase , """poker_hands.txt""" )
with open(lowercase ) as file_hand:
for line in file_hand:
__a : Union[str, Any] = line[:1_4].strip()
__a : Optional[Any] = line[1_5:].strip()
__a , __a : List[str] = PokerHand(lowercase ), PokerHand(lowercase )
__a : str = player.compare_with(lowercase )
if output == "Win":
answer += 1
assert answer == 3_7_6 | 697 | 1 |
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Optional[int] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
lowercase__ = AlbertTokenizer
lowercase__ = AlbertTokenizerFast
lowercase__ = True
lowercase__ = True
lowercase__ = True
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a : Optional[int] = AlbertTokenizer(__UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = """this is a test"""
__a : Optional[int] = """this is a test"""
return input_text, output_text
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = """<pad>"""
__a : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(__UpperCamelCase ) , 3_0000 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__a : int = self.get_tokenizer()
__a : Any = self.get_rust_tokenizer()
__a : Optional[Any] = """I was born in 92000, and this is falsé."""
__a : Any = tokenizer.tokenize(__UpperCamelCase )
__a : List[Any] = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
__a : Optional[int] = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
__a : Any = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
__a : Union[str, Any] = self.get_rust_tokenizer()
__a : Union[str, Any] = tokenizer.encode(__UpperCamelCase )
__a : List[str] = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = AlbertTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
__a : Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__UpperCamelCase , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [48, 25, 21, 1289] )
__a : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__UpperCamelCase , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
__a : Tuple = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
__a : Any = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = AlbertTokenizer(__UpperCamelCase )
__a : Any = tokenizer.encode("""sequence builders""" )
__a : str = tokenizer.encode("""multi-sequence build""" )
__a : List[str] = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase )
__a : Optional[int] = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase , __UpperCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , ) | 697 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.