code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from string import ascii_lowercase, ascii_uppercase
def __UpperCamelCase ( lowerCAmelCase__ : Tuple ):
if not sentence:
return ""
__a : int = dict(zip(_A , _A ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 216 |
__magic_name__: str = [0, 2, 4, 6, 8]
__magic_name__: Optional[int] = [1, 3, 5, 7, 9]
def UpperCamelCase ( _A, _A, _A, _A ):
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1, -1, -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__magic_name__ : List[Any] = 0
for digit in range(10 ):
__magic_name__ : Optional[int] = digit
result += reversible_numbers(
0, (remainder + 2 * digit) // 10, _A, _A )
return result
__magic_name__ : str = 0
for digita in range(10 ):
__magic_name__ : Optional[Any] = digita
if (remainder + digita) % 2 == 0:
__magic_name__ : Tuple = ODD_DIGITS
else:
__magic_name__ : str = EVEN_DIGITS
for digita in other_parity_digits:
__magic_name__ : Tuple = digita
result += reversible_numbers(
remaining_length - 2, (remainder + digita + digita) // 10, _A, _A, )
return result
def UpperCamelCase ( _A = 9 ):
"""simple docstring"""
__magic_name__ : List[str] = 0
for length in range(1, max_power + 1 ):
result += reversible_numbers(_A, 0, [0] * length, _A )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 342 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class a ( _lowerCAmelCase ):
def UpperCamelCase_ ( self ):
lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , 'tf_padding' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , 'depth_multiplier' ) )
class a :
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=3 , _lowerCamelCase=3_2 , _lowerCamelCase=0.2_5 , _lowerCamelCase=8 , _lowerCamelCase=True , _lowerCamelCase=1_0_2_4 , _lowerCamelCase=3_2 , _lowerCamelCase="relu6" , _lowerCamelCase=0.1 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=1_0 , _lowerCamelCase=None , ):
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = image_size
lowercase = depth_multiplier
lowercase = min_depth
lowercase = tf_padding
lowercase = int(last_hidden_size * depth_multiplier )
lowercase = output_stride
lowercase = hidden_act
lowercase = classifier_dropout_prob
lowercase = use_labels
lowercase = is_training
lowercase = num_labels
lowercase = initializer_range
lowercase = scope
def UpperCamelCase_ ( self ):
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowercase = MobileNetVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowercase = self.num_labels
lowercase = MobileNetVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self ):
lowercase = self.prepare_config_and_inputs()
lowercase = config_and_inputs
lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( _lowerCAmelCase, _lowerCAmelCase, unittest.TestCase ):
UpperCAmelCase_ : List[str] =(MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
UpperCAmelCase_ : Dict =(
{'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase_ : Any =False
UpperCAmelCase_ : str =False
UpperCAmelCase_ : str =False
UpperCAmelCase_ : Dict =False
def UpperCamelCase_ ( self ):
lowercase = MobileNetVaModelTester(self )
lowercase = MobileNetVaConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def UpperCamelCase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV1 does not use inputs_embeds' )
def UpperCamelCase_ ( self ):
pass
@unittest.skip(reason='MobileNetV1 does not support input and output embeddings' )
def UpperCamelCase_ ( self ):
pass
@unittest.skip(reason='MobileNetV1 does not output attentions' )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(lowerCAmelCase__ )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def UpperCamelCase_ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def UpperCamelCase_ ( self ):
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowercase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
lowercase = outputs.hidden_states
lowercase = 2_6
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase_ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def UpperCamelCase_ ( self ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = MobileNetVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self ):
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v1_1.0_224' ) if is_vision_available() else None
)
@slow
def UpperCamelCase_ ( self ):
lowercase = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v1_1.0_224' ).to(lowerCAmelCase__ )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=lowerCAmelCase__ , return_tensors='pt' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
lowercase = model(**lowerCAmelCase__ )
# verify the logits
lowercase = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
lowercase = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 220 |
def UpperCamelCase ( _A ):
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError("""String must only contain alphabetic characters.""" )
__magic_name__ : int = sorted(string.lower() )
return len(_A ) == len(set(_A ) )
if __name__ == "__main__":
__magic_name__: Dict = input("Enter a string ").strip()
__magic_name__: Union[str, Any] = is_isogram(input_str)
print(F"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 342 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__magic_name__ = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__magic_name__ = {"facebook/blenderbot-3B": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__SCREAMING_SNAKE_CASE = bs[:]
__SCREAMING_SNAKE_CASE = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_A )
cs.append(2**8 + n )
n += 1
__SCREAMING_SNAKE_CASE = [chr(_A ) for n in cs]
return dict(zip(_A , _A ) )
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = set()
__SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__SCREAMING_SNAKE_CASE = char
return pairs
class SCREAMING_SNAKE_CASE_ ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase : Tuple = VOCAB_FILES_NAMES
__lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="replace" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=False , **lowerCAmelCase__ , ):
__SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else bos_token
__SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else eos_token
__SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else sep_token
__SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else cls_token
__SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else unk_token
__SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="""utf-8""") as vocab_handle:
__SCREAMING_SNAKE_CASE = json.load(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
__SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding
__SCREAMING_SNAKE_CASE = bytes_to_unicode()
__SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="""utf-8""") as merges_handle:
__SCREAMING_SNAKE_CASE = merges_handle.read().split("""\n""")[1:-1]
__SCREAMING_SNAKE_CASE = [tuple(merge.split()) for merge in bpe_merges]
__SCREAMING_SNAKE_CASE = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__))))
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__SCREAMING_SNAKE_CASE = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def snake_case_ ( self):
return len(self.encoder)
def snake_case_ ( self):
return dict(self.encoder , **self.added_tokens_encoder)
def snake_case_ ( self , lowerCAmelCase__):
if token in self.cache:
return self.cache[token]
__SCREAMING_SNAKE_CASE = tuple(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = get_pairs(lowerCAmelCase__)
if not pairs:
return token
while True:
__SCREAMING_SNAKE_CASE = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__: self.bpe_ranks.get(lowerCAmelCase__ , float("""inf""")))
if bigram not in self.bpe_ranks:
break
__SCREAMING_SNAKE_CASE = bigram
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
while i < len(lowerCAmelCase__):
try:
__SCREAMING_SNAKE_CASE = word.index(lowerCAmelCase__ , lowerCAmelCase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
__SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(lowerCAmelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
__SCREAMING_SNAKE_CASE = tuple(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = new_word
if len(lowerCAmelCase__) == 1:
break
else:
__SCREAMING_SNAKE_CASE = get_pairs(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """ """.join(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = word
return word
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = []
for token in re.findall(self.pat , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__).split(""" """))
return bpe_tokens
def snake_case_ ( self , lowerCAmelCase__):
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token))
def snake_case_ ( self , lowerCAmelCase__):
return self.decoder.get(lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = """""".join(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text]).decode("""utf-8""" , errors=self.errors)
return text
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None):
if not os.path.isdir(lowerCAmelCase__):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
__SCREAMING_SNAKE_CASE = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
__SCREAMING_SNAKE_CASE = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""])
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__) + """\n""")
__SCREAMING_SNAKE_CASE = 0
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""") as writer:
writer.write("""#version: 0.2\n""")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""")
__SCREAMING_SNAKE_CASE = token_index
writer.write(""" """.join(lowerCAmelCase__) + """\n""")
index += 1
return vocab_file, merge_file
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__)
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__)) + [1]
return [1] + ([0] * len(lowerCAmelCase__)) + [1, 1] + ([0] * len(lowerCAmelCase__)) + [1]
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None):
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=False , **lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = kwargs.pop("""add_prefix_space""" , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__) > 0 and not text[0].isspace()):
__SCREAMING_SNAKE_CASE = """ """ + text
return (text, kwargs)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None):
return token_ids_a + [self.eos_token_id]
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text)
else:
# Generated responses should contain them already.
inputs.append(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """ """.join(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.encode(lowerCAmelCase__)
if len(lowerCAmelCase__) > self.model_max_length:
__SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens.")
return input_ids
| 100 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 342 | 0 |
import itertools
import math
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 2
while True:
if is_prime(_A ):
yield num
num += 1
def _SCREAMING_SNAKE_CASE ( lowercase : Dict = 1_00_01 ):
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , _A ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 204 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = 32 , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 2_55 , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , lowerCAmelCase__ = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , lowerCAmelCase__ = True , lowerCAmelCase__=7 , lowerCAmelCase__=30 , lowerCAmelCase__=4_00 , lowerCAmelCase__=3 , ) -> Union[str, Any]:
__magic_name__ : str = parent
__magic_name__ : Dict = do_resize
__magic_name__ : Union[str, Any] = size if size is not None else {"""shortest_edge""": 2_88}
__magic_name__ : Union[str, Any] = size_divisor
__magic_name__ : Union[str, Any] = do_rescale
__magic_name__ : Dict = rescale_factor
__magic_name__ : Union[str, Any] = do_normalize
__magic_name__ : List[str] = do_center_crop
__magic_name__ : Tuple = image_mean
__magic_name__ : Tuple = image_std
__magic_name__ : Tuple = do_pad
__magic_name__ : int = batch_size
__magic_name__ : List[Any] = num_channels
__magic_name__ : int = min_resolution
__magic_name__ : str = max_resolution
def __magic_name__ ( self ) -> str:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> int:
if not batched:
__magic_name__ : Dict = self.size["""shortest_edge"""]
__magic_name__ : List[str] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
__magic_name__ ,__magic_name__ : List[Any] = image.size
else:
__magic_name__ ,__magic_name__ : Dict = image.shape[1], image.shape[2]
__magic_name__ : List[Any] = size / min(lowerCAmelCase__ , lowerCAmelCase__ )
if h < w:
__magic_name__ ,__magic_name__ : str = size, scale * w
else:
__magic_name__ ,__magic_name__ : Optional[Any] = scale * h, size
__magic_name__ : Tuple = int((13_33 / 8_00) * size )
if max(lowerCAmelCase__ , lowerCAmelCase__ ) > max_size:
__magic_name__ : Union[str, Any] = max_size / max(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = newh * scale
__magic_name__ : Any = neww * scale
__magic_name__ ,__magic_name__ : str = int(newh + 0.5 ), int(neww + 0.5 )
__magic_name__ ,__magic_name__ : int = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__magic_name__ : Union[str, Any] = []
for image in image_inputs:
__magic_name__ ,__magic_name__ : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__magic_name__ : Optional[Any] = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
__magic_name__ : Tuple = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : int = BridgeTowerImageProcessor if is_vision_available() else None
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : Any = BridgeTowerImageProcessingTester(self )
@property
def __magic_name__ ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self ) -> Any:
__magic_name__ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size_divisor""" ) )
def __magic_name__ ( self ) -> Optional[int]:
pass
def __magic_name__ ( self ) -> Tuple:
# Initialize image processor
__magic_name__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
__magic_name__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : str = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : Any = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __magic_name__ ( self ) -> Tuple:
# Initialize image processor
__magic_name__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
__magic_name__ : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : List[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : Any = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : Optional[int] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __magic_name__ ( self ) -> str:
# Initialize image processor
__magic_name__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
__magic_name__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : Dict = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 342 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A__ : int = logging.get_logger(__name__)
A__ : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
A__ : Tuple = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
A__ : Tuple = {
"gpt2": 10_24,
"gpt2-medium": 10_24,
"gpt2-large": 10_24,
"gpt2-xl": 10_24,
"distilgpt2": 10_24,
}
class _UpperCAmelCase ( _lowerCAmelCase ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ['''input_ids''', '''attention_mask''']
lowercase__ = GPTaTokenizer
def __init__( self : str, lowerCamelCase : str=None, lowerCamelCase : Any=None, lowerCamelCase : Tuple=None, lowerCamelCase : Union[str, Any]="<|endoftext|>", lowerCamelCase : str="<|endoftext|>", lowerCamelCase : int="<|endoftext|>", lowerCamelCase : Optional[int]=False, **lowerCamelCase : str, ):
'''simple docstring'''
super().__init__(
lowerCAmelCase__, lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, unk_token=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, add_prefix_space=lowerCAmelCase__, **lowerCAmelCase__, )
lowercase__ = kwargs.pop('''add_bos_token''', lowerCAmelCase__ )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''', lowerCAmelCase__ ) != add_prefix_space:
lowercase__ = getattr(lowerCAmelCase__, pre_tok_state.pop('''type''' ) )
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**lowerCAmelCase__ )
lowercase__ = add_prefix_space
def lowercase__ ( self : List[Any], *lowerCamelCase : str, **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = kwargs.get('''is_split_into_words''', lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase__, **lowerCAmelCase__ )
def lowercase__ ( self : Any, *lowerCamelCase : List[Any], **lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = kwargs.get('''is_split_into_words''', lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase__, **lowerCAmelCase__ )
def lowercase__ ( self : Tuple, lowerCamelCase : Any, lowerCamelCase : int = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCAmelCase__, name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def lowercase__ ( self : str, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__ ) + [self.eos_token_id] )
if len(lowerCAmelCase__ ) > self.model_max_length:
lowercase__ = input_ids[-self.model_max_length :]
return input_ids
| 207 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__: Tuple = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: Union[str, Any] = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
__magic_name__: Optional[Any] = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__magic_name__: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 342 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a__ ( _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] =KandinskyVaaControlnetImgaImgPipeline
UpperCAmelCase__ : Tuple =['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
UpperCAmelCase__ : List[Any] =['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
UpperCAmelCase__ : Optional[Any] =[
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase__ : List[str] =False
@property
def _lowercase ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
return 3_2
@property
def _lowercase ( self : List[Any] ) ->List[Any]:
"""simple docstring"""
return 3_2
@property
def _lowercase ( self : List[Any] ) ->List[Any]:
"""simple docstring"""
return self.time_input_dim
@property
def _lowercase ( self : List[str] ) ->Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def _lowercase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
return 1_0_0
@property
def _lowercase ( self : Tuple ) ->Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
SCREAMING_SNAKE_CASE : str = UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def _lowercase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowercase ( self : Dict ) ->Dict:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase ( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_unet
SCREAMING_SNAKE_CASE : int = self.dummy_movq
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_00_85,
"""beta_end""": 0.0_12,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _lowercase ( self : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple=0 ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCAmelCase__ )
# create init_image
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Dict = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("""RGB""" ).resize((2_5_6, 2_5_6) )
# create hint
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
if str(lowerCAmelCase__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 1_0,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def _lowercase ( self : Any ) ->Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = """cpu"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = output.images
SCREAMING_SNAKE_CASE : Optional[int] = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0]
SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
SCREAMING_SNAKE_CASE : int = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : List[str] ) ->Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : str ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
SCREAMING_SNAKE_CASE : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
SCREAMING_SNAKE_CASE : List[Any] = init_image.resize((5_1_2, 5_1_2) )
SCREAMING_SNAKE_CASE : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
SCREAMING_SNAKE_CASE : Any = torch.from_numpy(np.array(lowerCAmelCase__ ) ).float() / 2_5_5.0
SCREAMING_SNAKE_CASE : Dict = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : List[str] = """A robot, 4k photo"""
SCREAMING_SNAKE_CASE : Optional[Any] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : str = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[Any] = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe_prior(
lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.85 , generator=lowerCAmelCase__ , negative_prompt="""""" , ).to_tuple()
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline(
image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , hint=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=1_0_0 , height=5_1_2 , width=5_1_2 , strength=0.5 , output_type="""np""" , )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 245 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__: Dict = logging.get_logger(__name__)
__magic_name__: List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
__magic_name__: Optional[Any] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
__magic_name__: List[Any] = {
"facebook/bart-base": 1_024,
"facebook/bart-large": 1_024,
"facebook/bart-large-mnli": 1_024,
"facebook/bart-large-cnn": 1_024,
"facebook/bart-large-xsum": 1_024,
"yjernite/bart_eli5": 1_024,
}
@lru_cache()
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : Any = (
list(range(ord("""!""" ), ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ), ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ), ord("""ÿ""" ) + 1 ) )
)
__magic_name__ : Any = bs[:]
__magic_name__ : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_A )
cs.append(2**8 + n )
n += 1
__magic_name__ : List[str] = [chr(_A ) for n in cs]
return dict(zip(_A, _A ) )
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : str = set()
__magic_name__ : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__magic_name__ : List[Any] = char
return pairs
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="replace" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Dict:
__magic_name__ : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
__magic_name__ : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
__magic_name__ : str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
__magic_name__ : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
__magic_name__ : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
__magic_name__ : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
__magic_name__ : Union[str, Any] = json.load(lowerCAmelCase__ )
__magic_name__ : Any = {v: k for k, v in self.encoder.items()}
__magic_name__ : Tuple = errors # how to handle errors in decoding
__magic_name__ : Tuple = bytes_to_unicode()
__magic_name__ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as merges_handle:
__magic_name__ : Optional[Any] = merges_handle.read().split("""\n""" )[1:-1]
__magic_name__ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
__magic_name__ : int = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__magic_name__ : str = {}
__magic_name__ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__magic_name__ : Union[str, Any] = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def __magic_name__ ( self ) -> Optional[Any]:
return len(self.encoder )
def __magic_name__ ( self ) -> Optional[int]:
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
if token in self.cache:
return self.cache[token]
__magic_name__ : Union[str, Any] = tuple(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
__magic_name__ : Union[str, Any] = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__magic_name__ ,__magic_name__ : List[str] = bigram
__magic_name__ : Any = []
__magic_name__ : Any = 0
while i < len(lowerCAmelCase__ ):
try:
__magic_name__ : str = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__magic_name__ : Optional[Any] = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__magic_name__ : str = tuple(lowerCAmelCase__ )
__magic_name__ : Optional[int] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
__magic_name__ : List[str] = get_pairs(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = """ """.join(lowerCAmelCase__ )
__magic_name__ : str = word
return word
def __magic_name__ ( self , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : str = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
__magic_name__ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(""" """ ) )
return bpe_tokens
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.decoder.get(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Any:
__magic_name__ : Tuple = """""".join(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ : Tuple = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__ : List[str] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + """\n""" )
__magic_name__ : Optional[Any] = 0
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
__magic_name__ : Optional[int] = token_index
writer.write(""" """.join(lowerCAmelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__magic_name__ : List[str] = [self.cls_token_id]
__magic_name__ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
__magic_name__ : Dict = [self.sep_token_id]
__magic_name__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False , **lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Any = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
__magic_name__ : List[Any] = """ """ + text
return (text, kwargs)
| 342 | 0 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> str:
a : Tuple = """ylacombe/bark-small"""
a : List[str] = tempfile.mkdtemp()
a : Optional[Any] = """en_speaker_1"""
a : Union[str, Any] = """This is a test string"""
a : Optional[int] = """speaker_embeddings_path.json"""
a : Any = """speaker_embeddings"""
def __a ( self , **lowerCAmelCase__ ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase__ )
def __a ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __a ( self ) -> Tuple:
a : Optional[Any] = self.get_tokenizer()
a : int = BarkProcessor(tokenizer=lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
a : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __a ( self ) -> Optional[int]:
a : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
a : Optional[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
a : str = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __a ( self ) -> Any:
a : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
a : Union[str, Any] = 35
a : List[Any] = 2
a : Dict = 8
a : Tuple = {
"""semantic_prompt""": np.ones(lowerCAmelCase__ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
a : Optional[int] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
a : Union[str, Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
a : Dict = os.path.join(self.tmpdirname , "file.npz" )
np.savez(lowerCAmelCase__ , **lowerCAmelCase__ )
a : Optional[Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
a : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
a : Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def __a ( self ) -> Optional[Any]:
a : str = self.get_tokenizer()
a : Dict = BarkProcessor(tokenizer=lowerCAmelCase__ )
a : Optional[Any] = processor(text=self.input_string )
a : List[Any] = tokenizer(
self.input_string , padding="max_length" , max_length=256 , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 105 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=16 , lowerCAmelCase__=[32, 64, 1_28] , lowerCAmelCase__=[1, 2, 1] , lowerCAmelCase__=[2, 2, 4] , lowerCAmelCase__=2 , lowerCAmelCase__=2.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-5 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=10 , lowerCAmelCase__=8 , lowerCAmelCase__=["stage1", "stage2"] , lowerCAmelCase__=[1, 2] , ) -> str:
__magic_name__ : Optional[int] = parent
__magic_name__ : Any = batch_size
__magic_name__ : Union[str, Any] = image_size
__magic_name__ : Optional[int] = patch_size
__magic_name__ : Union[str, Any] = num_channels
__magic_name__ : str = embed_dim
__magic_name__ : int = hidden_sizes
__magic_name__ : Union[str, Any] = depths
__magic_name__ : List[str] = num_heads
__magic_name__ : str = window_size
__magic_name__ : Optional[Any] = mlp_ratio
__magic_name__ : Dict = qkv_bias
__magic_name__ : Dict = hidden_dropout_prob
__magic_name__ : Optional[Any] = attention_probs_dropout_prob
__magic_name__ : List[Any] = drop_path_rate
__magic_name__ : Optional[Any] = hidden_act
__magic_name__ : int = use_absolute_embeddings
__magic_name__ : Dict = patch_norm
__magic_name__ : Tuple = layer_norm_eps
__magic_name__ : List[str] = initializer_range
__magic_name__ : Optional[int] = is_training
__magic_name__ : Optional[Any] = scope
__magic_name__ : Union[str, Any] = use_labels
__magic_name__ : Optional[Any] = type_sequence_label_size
__magic_name__ : Union[str, Any] = encoder_stride
__magic_name__ : List[Any] = out_features
__magic_name__ : Union[str, Any] = out_indices
def __magic_name__ ( self ) -> str:
__magic_name__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Optional[Any] = None
if self.use_labels:
__magic_name__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Dict = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ) -> List[Any]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : Any = FocalNetModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Optional[int] = model(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__magic_name__ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : List[str] = FocalNetBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Tuple = model(lowerCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__magic_name__ : Optional[Any] = None
__magic_name__ : List[str] = FocalNetBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Union[str, Any] = model(lowerCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Optional[int] = FocalNetForMaskedImageModeling(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : str = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__magic_name__ : Optional[int] = 1
__magic_name__ : int = FocalNetForMaskedImageModeling(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : int = self.type_sequence_label_size
__magic_name__ : Tuple = FocalNetForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : int = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__magic_name__ : Optional[int] = 1
__magic_name__ : Dict = FocalNetForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : Dict = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self ) -> int:
__magic_name__ : int = self.prepare_config_and_inputs()
__magic_name__ ,__magic_name__ ,__magic_name__ : Dict = config_and_inputs
__magic_name__ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : str = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowercase__ : Any = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : Dict = False
lowercase__ : Dict = False
lowercase__ : int = False
lowercase__ : Tuple = False
lowercase__ : Optional[Any] = False
def __magic_name__ ( self ) -> Dict:
__magic_name__ : Optional[Any] = FocalNetModelTester(self )
__magic_name__ : int = ConfigTester(self , config_class=lowerCAmelCase__ , embed_dim=37 , has_text_modality=lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self ) -> List[str]:
return
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def __magic_name__ ( self ) -> List[str]:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def __magic_name__ ( self ) -> List[Any]:
pass
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ ,__magic_name__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__magic_name__ : Dict = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ ,__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__magic_name__ : str = model_class(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Tuple = [*signature.parameters.keys()]
__magic_name__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
__magic_name__ : List[Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__magic_name__ : Optional[Any] = outputs.hidden_states
__magic_name__ : Union[str, Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# FocalNet has a different seq_length
__magic_name__ : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__magic_name__ : str = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ : Tuple = reshaped_hidden_states[0].shape
__magic_name__ : Union[str, Any] = (
reshaped_hidden_states[0].view(lowerCAmelCase__ , lowerCAmelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __magic_name__ ( self ) -> str:
__magic_name__ ,__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__magic_name__ : List[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : Optional[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
__magic_name__ ,__magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[Any] = 3
__magic_name__ : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__magic_name__ : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__magic_name__ : Tuple = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__magic_name__ : Optional[int] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : str = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) )
@slow
def __magic_name__ ( self ) -> Union[str, Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Optional[int] = FocalNetModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ ,__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Dict = _config_zero_init(lowerCAmelCase__ )
for model_class in self.all_model_classes:
__magic_name__ : Any = model_class(config=lowerCAmelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class snake_case__ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ) -> Optional[int]:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : int = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = self.default_image_processor
__magic_name__ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__magic_name__ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__magic_name__ : List[Any] = model(**lowerCAmelCase__ )
# verify the logits
__magic_name__ : Union[str, Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__magic_name__ : Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Any = (FocalNetBackbone,) if is_torch_available() else ()
lowercase__ : Optional[int] = FocalNetConfig
lowercase__ : Dict = False
def __magic_name__ ( self ) -> int:
__magic_name__ : Dict = FocalNetModelTester(self )
| 342 | 0 |
'''simple docstring'''
from math import isqrt
def _UpperCAmelCase ( _lowerCamelCase : int ) -> List[str]:
return all(number % divisor != 0 for divisor in range(2 , isqrt(_A ) + 1 ) )
def _UpperCAmelCase ( _lowerCamelCase : List[Any] = 10**6 ) -> Any:
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[int] = 7
while prime_candidate < max_prime:
primes_count += is_prime(_A )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 309 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=10 , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=2 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=10 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__="divided_space_time" , lowerCAmelCase__=None , ) -> List[str]:
__magic_name__ : int = parent
__magic_name__ : Tuple = batch_size
__magic_name__ : int = image_size
__magic_name__ : str = num_channels
__magic_name__ : Dict = patch_size
__magic_name__ : Tuple = num_frames
__magic_name__ : List[Any] = is_training
__magic_name__ : List[Any] = use_labels
__magic_name__ : Dict = hidden_size
__magic_name__ : List[Any] = num_hidden_layers
__magic_name__ : str = num_attention_heads
__magic_name__ : List[Any] = intermediate_size
__magic_name__ : Dict = hidden_act
__magic_name__ : List[Any] = hidden_dropout_prob
__magic_name__ : Union[str, Any] = attention_probs_dropout_prob
__magic_name__ : Tuple = attention_type
__magic_name__ : List[str] = initializer_range
__magic_name__ : Optional[Any] = scope
__magic_name__ : Tuple = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__magic_name__ : str = (image_size // patch_size) ** 2
__magic_name__ : Any = (num_frames) * self.num_patches_per_frame + 1
def __magic_name__ ( self ) -> Dict:
__magic_name__ : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : str = None
if self.use_labels:
__magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__magic_name__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ) -> str:
__magic_name__ : Dict = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__magic_name__ : Optional[Any] = self.num_labels
return config
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : List[Any] = TimesformerModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Optional[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
__magic_name__ : int = TimesformerForVideoClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : List[Any] = model(lowerCAmelCase__ )
# verify the logits shape
__magic_name__ : List[Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowerCAmelCase__ )
def __magic_name__ ( self ) -> Any:
__magic_name__ : Union[str, Any] = self.prepare_config_and_inputs()
__magic_name__ ,__magic_name__ ,__magic_name__ : Tuple = config_and_inputs
__magic_name__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Tuple = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowercase__ : Union[str, Any] = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowercase__ : int = False
lowercase__ : str = False
lowercase__ : Tuple = False
lowercase__ : Any = False
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : List[Any] = TimesformerModelTester(self )
__magic_name__ : List[str] = ConfigTester(
self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> List[str]:
__magic_name__ : List[str] = copy.deepcopy(lowerCAmelCase__ )
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
__magic_name__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def __magic_name__ ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def __magic_name__ ( self ) -> str:
pass
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ ,__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : List[Any] = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ ,__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Dict = model_class(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Optional[int] = [*signature.parameters.keys()]
__magic_name__ : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowerCAmelCase__ )
@slow
def __magic_name__ ( self ) -> Optional[int]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : List[str] = TimesformerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
if not self.has_attentions:
pass
else:
__magic_name__ ,__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[int] = True
for model_class in self.all_model_classes:
__magic_name__ : Tuple = self.model_tester.seq_length
__magic_name__ : int = self.model_tester.num_frames
__magic_name__ : Any = True
__magic_name__ : Tuple = False
__magic_name__ : Optional[int] = True
__magic_name__ : str = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__magic_name__ : List[str] = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__magic_name__ : Optional[Any] = True
__magic_name__ : Optional[Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : Optional[int] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__magic_name__ : int = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__magic_name__ : Union[str, Any] = len(lowerCAmelCase__ )
# Check attention is always last and order is fine
__magic_name__ : str = True
__magic_name__ : Optional[Any] = True
__magic_name__ : int = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(out_len + 1 , len(lowerCAmelCase__ ) )
__magic_name__ : Union[str, Any] = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __magic_name__ ( self ) -> Any:
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__magic_name__ : Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : int = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__magic_name__ : Optional[Any] = outputs.hidden_states
__magic_name__ : str = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__magic_name__ : str = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__magic_name__ ,__magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Optional[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : Union[str, Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[Any] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""", filename="""eating_spaghetti.npy""", repo_type="""dataset""" )
__magic_name__ : List[str] = np.load(_A )
return list(_A )
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ) -> Optional[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Dict = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
lowerCAmelCase__ )
__magic_name__ : str = self.default_image_processor
__magic_name__ : Any = prepare_video()
__magic_name__ : Dict = image_processor(video[:8] , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__magic_name__ : int = model(**lowerCAmelCase__ )
# verify the logits
__magic_name__ : Optional[int] = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = torch.tensor([-0.3_0_1_6, -0.7_7_1_3, -0.4_2_0_5] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 342 | 0 |
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
UpperCAmelCase__ = VideoClassificationPipeline(model=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , top_k=2 )
UpperCAmelCase__ = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] ):
"""simple docstring"""
for example in examples:
UpperCAmelCase__ = video_classifier(lowerCAmelCase__ )
self.assertEqual(
lowerCAmelCase__ , [
{"""score""": ANY(lowerCAmelCase__ ), """label""": ANY(lowerCAmelCase__ )},
{"""score""": ANY(lowerCAmelCase__ ), """label""": ANY(lowerCAmelCase__ )},
] , )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
UpperCAmelCase__ = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
UpperCAmelCase__ = pipeline(
"""video-classification""" , model=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , frame_sampling_rate=4 )
UpperCAmelCase__ = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
UpperCAmelCase__ = video_classifier(lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}] , )
UpperCAmelCase__ = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
] , )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
pass
| 346 |
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : List[Any] = [0] * len(_A )
__magic_name__ : List[str] = []
__magic_name__ : List[str] = [1] * len(_A )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_A ) ):
if indegree[i] == 0:
queue.append(_A )
while queue:
__magic_name__ : Dict = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__magic_name__ : int = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_A )
print(max(_A ) )
# Adjacency list of Graph
__magic_name__: str = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 342 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Optional[Any] = {
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 285 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> str:
__magic_name__ : Tuple = """ylacombe/bark-small"""
__magic_name__ : List[str] = tempfile.mkdtemp()
__magic_name__ : Optional[Any] = """en_speaker_1"""
__magic_name__ : Union[str, Any] = """This is a test string"""
__magic_name__ : Optional[int] = """speaker_embeddings_path.json"""
__magic_name__ : Any = """speaker_embeddings"""
def __magic_name__ ( self , **lowerCAmelCase__ ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase__ )
def __magic_name__ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Optional[Any] = self.get_tokenizer()
__magic_name__ : int = BarkProcessor(tokenizer=lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
__magic_name__ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__magic_name__ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__magic_name__ : str = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __magic_name__ ( self ) -> Any:
__magic_name__ : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__magic_name__ : Union[str, Any] = 35
__magic_name__ : List[Any] = 2
__magic_name__ : Dict = 8
__magic_name__ : Tuple = {
"""semantic_prompt""": np.ones(lowerCAmelCase__ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__magic_name__ : Optional[int] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__magic_name__ : Dict = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowerCAmelCase__ , **lowerCAmelCase__ )
__magic_name__ : Optional[Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__magic_name__ : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__magic_name__ : Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : str = self.get_tokenizer()
__magic_name__ : Dict = BarkProcessor(tokenizer=lowerCAmelCase__ )
__magic_name__ : Optional[Any] = processor(text=self.input_string )
__magic_name__ : List[Any] = tokenizer(
self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 342 | 0 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
def constraint_to_multiple_of(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=None ):
lowerCAmelCase__ : Tuple = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowerCAmelCase__ : List[Any] = math.floor(val / multiple ) * multiple
if x < min_val:
lowerCAmelCase__ : Any = math.ceil(val / multiple ) * multiple
return x
lowerCAmelCase__ : int = (output_size, output_size) if isinstance(_A , _A ) else output_size
lowerCAmelCase__ : Tuple = get_image_size(_A )
lowerCAmelCase__ : Union[str, Any] = output_size
# determine new height and width
lowerCAmelCase__ : int = output_height / input_height
lowerCAmelCase__ : Tuple = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowerCAmelCase__ : List[str] = scale_width
else:
# fit height
lowerCAmelCase__ : Union[str, Any] = scale_height
lowerCAmelCase__ : Optional[int] = constraint_to_multiple_of(scale_height * input_height , multiple=_A )
lowerCAmelCase__ : int = constraint_to_multiple_of(scale_width * input_width , multiple=_A )
return (new_height, new_width)
class A__ ( _lowerCAmelCase ):
lowercase = ['''pixel_values''']
def __init__( self : int , a : Any = True , a : str = None , a : List[Any] = PILImageResampling.BILINEAR , a : int = False , a : List[Any] = 1 , a : List[Any] = True , a : Dict = 1 / 255 , a : List[str] = True , a : Union[str, Any] = None , a : str = None , **a : List[str] , ):
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
lowerCAmelCase__ : int = size if size is not None else {"""height""": 384, """width""": 384}
lowerCAmelCase__ : str = get_size_dict(lowerCAmelCase__ )
lowerCAmelCase__ : Optional[int] = do_resize
lowerCAmelCase__ : Optional[int] = size
lowerCAmelCase__ : int = keep_aspect_ratio
lowerCAmelCase__ : Any = ensure_multiple_of
lowerCAmelCase__ : List[str] = resample
lowerCAmelCase__ : Union[str, Any] = do_rescale
lowerCAmelCase__ : List[str] = rescale_factor
lowerCAmelCase__ : Union[str, Any] = do_normalize
lowerCAmelCase__ : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self : int , a : List[str] , a : Union[str, Any] , a : Optional[int] = False , a : Optional[int] = 1 , a : List[str] = PILImageResampling.BICUBIC , a : List[str] = None , **a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : Any = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowerCAmelCase__ : int = get_resize_output_image_size(
lowerCAmelCase__ , output_size=(size['height'], size['width']) , keep_aspect_ratio=lowerCAmelCase__ , multiple=lowerCAmelCase__ , )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _lowerCamelCase ( self : str , a : Union[str, Any] , a : int , a : int = None , **a : Union[str, Any] , ):
'''simple docstring'''
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _lowerCamelCase ( self : Optional[Any] , a : List[Any] , a : Union[str, Any] , a : List[Any] , a : List[Any] = None , **a : List[Any] , ):
'''simple docstring'''
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _lowerCamelCase ( self : Tuple , a : Optional[Any] , a : Dict = None , a : Dict = None , a : Dict = None , a : Union[str, Any] = None , a : Optional[int] = None , a : List[str] = None , a : int = None , a : int = None , a : List[str] = None , a : List[str] = None , a : Dict = None , a : Any = ChannelDimension.FIRST , **a : Optional[int] , ):
'''simple docstring'''
lowerCAmelCase__ : Any = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ : Dict = size if size is not None else self.size
lowerCAmelCase__ : str = get_size_dict(lowerCAmelCase__ )
lowerCAmelCase__ : Optional[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowerCAmelCase__ : Union[str, Any] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowerCAmelCase__ : Tuple = resample if resample is not None else self.resample
lowerCAmelCase__ : str = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ : Dict = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ : Any = image_std if image_std is not None else self.image_std
lowerCAmelCase__ : Optional[Any] = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase__ : str = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
lowerCAmelCase__ : Dict = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_rescale:
lowerCAmelCase__ : Any = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
lowerCAmelCase__ : str = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
lowerCAmelCase__ : Union[str, Any] = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
lowerCAmelCase__ : Tuple = {"""pixel_values""": images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
def _lowerCamelCase ( self : Optional[int] , a : str , a : Optional[int] = None ):
'''simple docstring'''
lowerCAmelCase__ : str = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[Any] = target_sizes.numpy()
lowerCAmelCase__ : List[str] = []
for idx in range(len(lowerCAmelCase__ ) ):
lowerCAmelCase__ : Optional[int] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowerCAmelCase__ )
lowerCAmelCase__ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase__ )
else:
lowerCAmelCase__ : Tuple = logits.argmax(dim=1 )
lowerCAmelCase__ : Union[str, Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 212 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=18 , lowerCAmelCase__=30 , lowerCAmelCase__=4_00 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=[0.5, 0.5, 0.5] , ) -> Optional[int]:
__magic_name__ : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 18}
__magic_name__ : str = parent
__magic_name__ : Any = batch_size
__magic_name__ : Any = num_channels
__magic_name__ : List[str] = image_size
__magic_name__ : Tuple = min_resolution
__magic_name__ : Union[str, Any] = max_resolution
__magic_name__ : List[str] = do_resize
__magic_name__ : Optional[Any] = size
__magic_name__ : Optional[Any] = do_normalize
__magic_name__ : Any = image_mean
__magic_name__ : List[str] = image_std
def __magic_name__ ( self ) -> List[str]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Any = DPTImageProcessor if is_vision_available() else None
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Dict = DPTImageProcessingTester(self )
@property
def __magic_name__ ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size""" ) )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
__magic_name__ : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def __magic_name__ ( self ) -> str:
# Initialize image_processing
__magic_name__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
__magic_name__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ : Optional[int] = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self ) -> Tuple:
# Initialize image_processing
__magic_name__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
__magic_name__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ : int = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self ) -> Optional[Any]:
# Initialize image_processing
__magic_name__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
__magic_name__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ : Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 342 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowercase__ =None
lowercase__ =logging.get_logger(__name__)
lowercase__ ={"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
lowercase__ ={
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
lowercase__ ={
"google/rembert": 256,
}
lowercase__ ="▁"
class UpperCamelCase__ ( _lowerCAmelCase ):
_SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Dict = RemBertTokenizer
def __init__(self : Any , snake_case_ : str=None , snake_case_ : Optional[int]=None , snake_case_ : Any=True , snake_case_ : List[str]=True , snake_case_ : Dict=False , snake_case_ : Optional[int]="[CLS]" , snake_case_ : Optional[int]="[SEP]" , snake_case_ : List[Any]="<unk>" , snake_case_ : str="[SEP]" , snake_case_ : List[Any]="<pad>" , snake_case_ : List[Any]="[CLS]" , snake_case_ : Optional[int]="[MASK]" , **snake_case_ : Union[str, Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
__a : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
__a : int = do_lower_case
__a : Optional[Any] = remove_space
__a : Tuple = keep_accents
__a : Union[str, Any] = vocab_file
__a : Dict = False if not self.vocab_file else True
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : Dict , snake_case_ : str = None ):
__a : Optional[int] = [self.sep_token_id]
__a : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase (self : Any , snake_case_ : Optional[int] , snake_case_ : List[str] = None , snake_case_ : Tuple = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def lowerCAmelCase (self : Any , snake_case_ : List[Any] , snake_case_ : Optional[Any] = None ):
__a : Union[str, Any] = [self.sep_token_id]
__a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase (self : Optional[Any] , snake_case_ : Any , snake_case_ : int = None ):
if not os.path.isdir(lowerCAmelCase__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCAmelCase__ ) )
return
__a : Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 216 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__magic_name__: Tuple = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : List[str] = '''facebook/nllb-200-distilled-600M'''
lowercase__ : List[Any] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
lowercase__ : List[str] = '''translator'''
lowercase__ : Optional[Any] = AutoTokenizer
lowercase__ : int = AutoModelForSeqaSeqLM
lowercase__ : List[Any] = LANGUAGE_CODES
lowercase__ : str = ['''text''', '''text''', '''text''']
lowercase__ : Any = ['''text''']
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
if src_lang not in self.lang_to_code:
raise ValueError(F'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'{tgt_lang} is not a supported language.' )
__magic_name__ : Tuple = self.lang_to_code[src_lang]
__magic_name__ : Dict = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCAmelCase__ , return_tensors="""pt""" , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.model.generate(**lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCAmelCase__ )
| 342 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a ( metaclass=_lowerCAmelCase ):
UpperCAmelCase_ : List[Any] =['''torch''', '''transformers''', '''onnx''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCamelCase_ ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCamelCase_ ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class a ( metaclass=_lowerCAmelCase ):
UpperCAmelCase_ : Any =['''torch''', '''transformers''', '''onnx''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCamelCase_ ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCamelCase_ ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class a ( metaclass=_lowerCAmelCase ):
UpperCAmelCase_ : str =['''torch''', '''transformers''', '''onnx''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCamelCase_ ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCamelCase_ ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class a ( metaclass=_lowerCAmelCase ):
UpperCAmelCase_ : Any =['''torch''', '''transformers''', '''onnx''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCamelCase_ ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCamelCase_ ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class a ( metaclass=_lowerCAmelCase ):
UpperCAmelCase_ : Optional[int] =['''torch''', '''transformers''', '''onnx''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCamelCase_ ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCamelCase_ ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class a ( metaclass=_lowerCAmelCase ):
UpperCAmelCase_ : Tuple =['''torch''', '''transformers''', '''onnx''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCamelCase_ ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCamelCase_ ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 220 |
import math
class snake_case__ :
def __init__( self , lowerCAmelCase__=0 ) -> Optional[int]: # a graph with Node 0,1,...,N-1
__magic_name__ : Tuple = n
__magic_name__ : Union[str, Any] = [
[math.inf for j in range(0 , lowerCAmelCase__ )] for i in range(0 , lowerCAmelCase__ )
] # adjacency matrix for weight
__magic_name__ : List[Any] = [
[math.inf for j in range(0 , lowerCAmelCase__ )] for i in range(0 , lowerCAmelCase__ )
] # dp[i][j] stores minimum distance from i to j
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : Dict = w
def __magic_name__ ( self ) -> Optional[int]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__magic_name__ : Optional[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
return self.dp[u][v]
if __name__ == "__main__":
__magic_name__: Dict = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 342 | 0 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_=0.999 , UpperCamelCase_="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
__SCREAMING_SNAKE_CASE = []
for i in range(_A ):
__SCREAMING_SNAKE_CASE = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_A ) / alpha_bar_fn(_A ) , _A ) )
return torch.tensor(_A , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE_ ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase : List[str] = [e.name for e in KarrasDiffusionSchedulers]
__lowercase : Tuple = 2
@register_to_config
def __init__( self , lowerCAmelCase__ = 1_0_0_0 , lowerCAmelCase__ = 0.0_00_85 , lowerCAmelCase__ = 0.0_12 , lowerCAmelCase__ = "linear" , lowerCAmelCase__ = None , lowerCAmelCase__ = "epsilon" , lowerCAmelCase__ = "linspace" , lowerCAmelCase__ = 0 , ):
if trained_betas is not None:
__SCREAMING_SNAKE_CASE = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa)
elif beta_schedule == "linear":
__SCREAMING_SNAKE_CASE = torch.linspace(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.floataa)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__SCREAMING_SNAKE_CASE = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase__ , dtype=torch.floataa) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__SCREAMING_SNAKE_CASE = betas_for_alpha_bar(lowerCAmelCase__)
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
__SCREAMING_SNAKE_CASE = 1.0 - self.betas
__SCREAMING_SNAKE_CASE = torch.cumprod(self.alphas , dim=0)
# set all values
self.set_timesteps(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=None):
if schedule_timesteps is None:
__SCREAMING_SNAKE_CASE = self.timesteps
__SCREAMING_SNAKE_CASE = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter) == 0:
__SCREAMING_SNAKE_CASE = 1 if len(lowerCAmelCase__) > 1 else 0
else:
__SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(lowerCAmelCase__) else timestep
__SCREAMING_SNAKE_CASE = self._index_counter[timestep_int]
return indices[pos].item()
@property
def snake_case_ ( self):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , ):
__SCREAMING_SNAKE_CASE = self.index_for_timestep(lowerCAmelCase__)
if self.state_in_first_order:
__SCREAMING_SNAKE_CASE = self.sigmas[step_index]
else:
__SCREAMING_SNAKE_CASE = self.sigmas_interpol[step_index]
__SCREAMING_SNAKE_CASE = sample / ((sigma**2 + 1) ** 0.5)
return sample
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ):
__SCREAMING_SNAKE_CASE = num_inference_steps
__SCREAMING_SNAKE_CASE = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__SCREAMING_SNAKE_CASE = np.linspace(0 , num_train_timesteps - 1 , lowerCAmelCase__ , dtype=lowerCAmelCase__)[::-1].copy()
elif self.config.timestep_spacing == "leading":
__SCREAMING_SNAKE_CASE = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE = (np.arange(0 , lowerCAmelCase__) * step_ratio).round()[::-1].copy().astype(lowerCAmelCase__)
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__SCREAMING_SNAKE_CASE = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE = (np.arange(lowerCAmelCase__ , 0 , -step_ratio)).round().copy().astype(lowerCAmelCase__)
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.")
__SCREAMING_SNAKE_CASE = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
__SCREAMING_SNAKE_CASE = torch.from_numpy(np.log(lowerCAmelCase__)).to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = np.interp(lowerCAmelCase__ , np.arange(0 , len(lowerCAmelCase__)) , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = np.concatenate([sigmas, [0.0]]).astype(np.floataa)
__SCREAMING_SNAKE_CASE = torch.from_numpy(lowerCAmelCase__).to(device=lowerCAmelCase__)
# interpolate sigmas
__SCREAMING_SNAKE_CASE = sigmas.log().lerp(sigmas.roll(1).log() , 0.5).exp()
__SCREAMING_SNAKE_CASE = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2), sigmas[-1:]])
__SCREAMING_SNAKE_CASE = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2), sigmas_interpol[-1:]])
if str(lowerCAmelCase__).startswith("""mps"""):
# mps does not support float64
__SCREAMING_SNAKE_CASE = torch.from_numpy(lowerCAmelCase__).to(lowerCAmelCase__ , dtype=torch.floataa)
else:
__SCREAMING_SNAKE_CASE = torch.from_numpy(lowerCAmelCase__).to(lowerCAmelCase__)
# interpolate timesteps
__SCREAMING_SNAKE_CASE = self.sigma_to_t(lowerCAmelCase__).to(lowerCAmelCase__ , dtype=timesteps.dtype)
__SCREAMING_SNAKE_CASE = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1).flatten()
__SCREAMING_SNAKE_CASE = torch.cat([timesteps[:1], interleaved_timesteps])
__SCREAMING_SNAKE_CASE = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__SCREAMING_SNAKE_CASE = defaultdict(lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__):
# get log sigma
__SCREAMING_SNAKE_CASE = sigma.log()
# get distribution
__SCREAMING_SNAKE_CASE = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__SCREAMING_SNAKE_CASE = dists.ge(0).cumsum(dim=0).argmax(dim=0).clamp(max=self.log_sigmas.shape[0] - 2)
__SCREAMING_SNAKE_CASE = low_idx + 1
__SCREAMING_SNAKE_CASE = self.log_sigmas[low_idx]
__SCREAMING_SNAKE_CASE = self.log_sigmas[high_idx]
# interpolate sigmas
__SCREAMING_SNAKE_CASE = (low - log_sigma) / (low - high)
__SCREAMING_SNAKE_CASE = w.clamp(0 , 1)
# transform interpolation to time range
__SCREAMING_SNAKE_CASE = (1 - w) * low_idx + w * high_idx
__SCREAMING_SNAKE_CASE = t.view(sigma.shape)
return t
@property
def snake_case_ ( self):
return self.sample is None
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ):
__SCREAMING_SNAKE_CASE = self.index_for_timestep(lowerCAmelCase__)
# advance index counter by 1
__SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(lowerCAmelCase__) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__SCREAMING_SNAKE_CASE = self.sigmas[step_index]
__SCREAMING_SNAKE_CASE = self.sigmas_interpol[step_index + 1]
__SCREAMING_SNAKE_CASE = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__SCREAMING_SNAKE_CASE = self.sigmas[step_index - 1]
__SCREAMING_SNAKE_CASE = self.sigmas_interpol[step_index]
__SCREAMING_SNAKE_CASE = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_interpol
__SCREAMING_SNAKE_CASE = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_interpol
__SCREAMING_SNAKE_CASE = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""")
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`")
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__SCREAMING_SNAKE_CASE = sigma_interpol - sigma_hat
# store for 2nd order step
__SCREAMING_SNAKE_CASE = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__SCREAMING_SNAKE_CASE = sigma_next - sigma_hat
__SCREAMING_SNAKE_CASE = self.sample
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__SCREAMING_SNAKE_CASE = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype)
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCAmelCase__):
# mps does not support float64
__SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device , dtype=torch.floataa)
__SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device , dtype=torch.floataa)
else:
__SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device)
__SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device)
__SCREAMING_SNAKE_CASE = [self.index_for_timestep(lowerCAmelCase__ , lowerCAmelCase__) for t in timesteps]
__SCREAMING_SNAKE_CASE = sigmas[step_indices].flatten()
while len(sigma.shape) < len(original_samples.shape):
__SCREAMING_SNAKE_CASE = sigma.unsqueeze(-1)
__SCREAMING_SNAKE_CASE = original_samples + noise * sigma
return noisy_samples
def __len__( self):
return self.config.num_train_timesteps
| 100 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class snake_case__ :
def __init__( self , lowerCAmelCase__ = None ) -> None:
if components is None:
__magic_name__ : Any = []
__magic_name__ : List[str] = list(lowerCAmelCase__ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(lowerCAmelCase__ , self.__components ) ) + ")"
def __add__( self , lowerCAmelCase__ ) -> Vector:
__magic_name__ : Dict = len(self )
if size == len(lowerCAmelCase__ ):
__magic_name__ : str = [self.__components[i] + other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else:
raise Exception("""must have the same size""" )
def __sub__( self , lowerCAmelCase__ ) -> Vector:
__magic_name__ : int = len(self )
if size == len(lowerCAmelCase__ ):
__magic_name__ : str = [self.__components[i] - other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self , lowerCAmelCase__ ) -> Vector:
...
@overload
def __mul__( self , lowerCAmelCase__ ) -> float:
...
def __mul__( self , lowerCAmelCase__ ) -> float | Vector:
if isinstance(lowerCAmelCase__ , (float, int) ):
__magic_name__ : Optional[Any] = [c * other for c in self.__components]
return Vector(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(self ) == len(lowerCAmelCase__ ):
__magic_name__ : Optional[Any] = len(self )
__magic_name__ : List[Any] = [self.__components[i] * other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return sum(lowerCAmelCase__ )
else: # error case
raise Exception("""invalid operand!""" )
def __magic_name__ ( self ) -> Vector:
return Vector(self.__components )
def __magic_name__ ( self , lowerCAmelCase__ ) -> float:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__magic_name__ : Optional[int] = value
def __magic_name__ ( self ) -> float:
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
__magic_name__ : Dict = [c**2 for c in self.__components]
return math.sqrt(sum(lowerCAmelCase__ ) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> float:
__magic_name__ : Optional[Any] = self * other
__magic_name__ : List[str] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def UpperCamelCase ( _A ):
"""simple docstring"""
assert isinstance(_A, _A )
return Vector([0] * dimension )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
assert isinstance(_A, _A ) and (isinstance(_A, _A ))
__magic_name__ : Union[str, Any] = [0] * dimension
__magic_name__ : Optional[int] = 1
return Vector(_A )
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
assert (
isinstance(_A, _A )
and isinstance(_A, _A )
and (isinstance(_A, (int, float) ))
)
return x * scalar + y
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
random.seed(_A )
__magic_name__ : Union[str, Any] = [random.randint(_A, _A ) for _ in range(_A )]
return Vector(_A )
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
__magic_name__ : Dict = matrix
__magic_name__ : Tuple = w
__magic_name__ : Union[str, Any] = h
def __str__( self ) -> str:
__magic_name__ : Dict = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , lowerCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__magic_name__ : Tuple = []
for i in range(self.__height ):
__magic_name__ : Tuple = [
self.__matrix[i][j] + other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self , lowerCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__magic_name__ : Optional[Any] = []
for i in range(self.__height ):
__magic_name__ : int = [
self.__matrix[i][j] - other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self , lowerCAmelCase__ ) -> Matrix:
...
@overload
def __mul__( self , lowerCAmelCase__ ) -> Vector:
...
def __mul__( self , lowerCAmelCase__ ) -> Vector | Matrix:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): # matrix-vector
if len(lowerCAmelCase__ ) == self.__width:
__magic_name__ : Tuple = zero_vector(self.__height )
for i in range(self.__height ):
__magic_name__ : Optional[int] = [
self.__matrix[i][j] * other.component(lowerCAmelCase__ )
for j in range(self.__width )
]
ans.change_component(lowerCAmelCase__ , sum(lowerCAmelCase__ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(lowerCAmelCase__ , (int, float) ): # matrix-scalar
__magic_name__ : Any = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
return None
def __magic_name__ ( self ) -> int:
return self.__height
def __magic_name__ ( self ) -> int:
return self.__width
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__magic_name__ : List[Any] = value
else:
raise Exception("""change_component: indices out of bounds""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
__magic_name__ : Optional[int] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCAmelCase__ ) ):
__magic_name__ : List[str] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCAmelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCAmelCase__ , lowerCAmelCase__ )
else:
raise Exception("""Indices out of bounds""" )
def __magic_name__ ( self ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__magic_name__ : str = [
self.__matrix[0][y] * self.cofactor(0 , lowerCAmelCase__ ) for y in range(self.__width )
]
return sum(lowerCAmelCase__ )
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : list[list[float]] = [[0] * n for _ in range(_A )]
return Matrix(_A, _A, _A )
def UpperCamelCase ( _A, _A, _A, _A ):
"""simple docstring"""
random.seed(_A )
__magic_name__ : list[list[float]] = [
[random.randint(_A, _A ) for _ in range(_A )] for _ in range(_A )
]
return Matrix(_A, _A, _A )
| 342 | 0 |
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : str ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
lowerCamelCase_ = str(bin(_A ) )[2:] # remove the leading "0b"
lowerCamelCase_ = str(bin(_A ) )[2:] # remove the leading "0b"
lowerCamelCase_ = max(len(_A ) , len(_A ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_A ) , b_binary.zfill(_A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 204 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__magic_name__: str = logging.get_logger(__name__)
__magic_name__: int = "▁"
__magic_name__: List[str] = {"vocab_file": "sentencepiece.bpe.model"}
__magic_name__: List[str] = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
__magic_name__: Tuple = {
"facebook/nllb-200-distilled-600M": 1_024,
}
# fmt: off
__magic_name__: int = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : str = VOCAB_FILES_NAMES
lowercase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : str = ['''input_ids''', '''attention_mask''']
lowercase__ : List[int] = []
lowercase__ : List[int] = []
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__ = None , lowerCAmelCase__=None , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
__magic_name__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
__magic_name__ : Optional[Any] = legacy_behaviour
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=lowerCAmelCase__ , **lowerCAmelCase__ , )
__magic_name__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase__ ) )
__magic_name__ : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
__magic_name__ : List[str] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__magic_name__ : List[Any] = 1
__magic_name__ : Dict = len(self.sp_model )
__magic_name__ : int = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase__ )
}
__magic_name__ : Optional[int] = {v: k for k, v in self.lang_code_to_id.items()}
__magic_name__ : Union[str, Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__magic_name__ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__magic_name__ : List[str] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__magic_name__ : List[Any] = src_lang if src_lang is not None else """eng_Latn"""
__magic_name__ : Any = self.lang_code_to_id[self._src_lang]
__magic_name__ : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Any:
__magic_name__ : List[Any] = self.__dict__.copy()
__magic_name__ : int = None
__magic_name__ : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCAmelCase__ ) -> Any:
__magic_name__ : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__magic_name__ : Any = {}
__magic_name__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __magic_name__ ( self ) -> str:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __magic_name__ ( self ) -> str:
return self._src_lang
@src_lang.setter
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
__magic_name__ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
__magic_name__ : Optional[int] = [1] * len(self.prefix_tokens )
__magic_name__ : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + ([0] * len(lowerCAmelCase__ )) + suffix_ones
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
__magic_name__ : str = [self.sep_token_id]
__magic_name__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__magic_name__ : Dict = src_lang
__magic_name__ : List[Any] = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
__magic_name__ : Optional[Any] = self.convert_tokens_to_ids(lowerCAmelCase__ )
__magic_name__ : Tuple = tgt_lang_id
return inputs
def __magic_name__ ( self ) -> int:
__magic_name__ : str = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__magic_name__ : List[str] = self.sp_model.PieceToId(lowerCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Tuple = """""".join(lowerCAmelCase__ ).replace(lowerCAmelCase__ , """ """ ).strip()
return out_string
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ : List[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , """wb""" ) as fi:
__magic_name__ : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = "eng_Latn" , lowerCAmelCase__ = None , lowerCAmelCase__ = "fra_Latn" , **lowerCAmelCase__ , ) -> BatchEncoding:
__magic_name__ : List[str] = src_lang
__magic_name__ : Dict = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
return self.set_src_lang_special_tokens(self.src_lang )
def __magic_name__ ( self ) -> List[str]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
__magic_name__ : Optional[int] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
__magic_name__ : List[str] = []
__magic_name__ : Tuple = [self.eos_token_id, self.cur_lang_code]
else:
__magic_name__ : str = [self.cur_lang_code]
__magic_name__ : List[Any] = [self.eos_token_id]
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
__magic_name__ : List[str] = self.lang_code_to_id[lang]
if self.legacy_behaviour:
__magic_name__ : List[str] = []
__magic_name__ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
__magic_name__ : Optional[int] = [self.cur_lang_code]
__magic_name__ : Union[str, Any] = [self.eos_token_id]
| 342 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : str = logging.get_logger(__name__)
A__ : Optional[int] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class _UpperCAmelCase ( _lowerCAmelCase ):
"""simple docstring"""
lowercase__ = '''ibert'''
def __init__( self : List[Any], lowerCamelCase : Any=30_522, lowerCamelCase : Union[str, Any]=768, lowerCamelCase : Optional[int]=12, lowerCamelCase : List[str]=12, lowerCamelCase : Optional[Any]=3_072, lowerCamelCase : Optional[Any]="gelu", lowerCamelCase : Tuple=0.1, lowerCamelCase : Tuple=0.1, lowerCamelCase : List[Any]=512, lowerCamelCase : int=2, lowerCamelCase : Union[str, Any]=0.02, lowerCamelCase : Optional[Any]=1E-12, lowerCamelCase : List[str]=1, lowerCamelCase : Dict=0, lowerCamelCase : Any=2, lowerCamelCase : Union[str, Any]="absolute", lowerCamelCase : str=False, lowerCamelCase : Optional[int]="none", **lowerCamelCase : List[Any], ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, **lowerCAmelCase__ )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = quant_mode
lowercase__ = force_dequant
class _UpperCAmelCase ( _lowerCAmelCase ):
"""simple docstring"""
@property
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 207 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : Dict = MobileBertConfig.from_json_file(_A )
print(f'Building PyTorch model from configuration: {config}' )
__magic_name__ : Tuple = MobileBertForPreTraining(_A )
# Load weights from tf checkpoint
__magic_name__ : int = load_tf_weights_in_mobilebert(_A, _A, _A )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict(), _A )
if __name__ == "__main__":
__magic_name__: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__magic_name__: Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 342 | 0 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict = True , UpperCAmelCase__ : List[str] = None , UpperCAmelCase__ : Union[str, Any] = 3_2 , UpperCAmelCase__ : str = True , UpperCAmelCase__ : Union[str, Any] = 1 / 2_5_5 , UpperCAmelCase__ : Tuple = True , UpperCAmelCase__ : List[Any] = True , UpperCAmelCase__ : Dict = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , UpperCAmelCase__ : Dict = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , UpperCAmelCase__ : Dict = True , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Optional[int]=3_0 , UpperCAmelCase__ : int=4_0_0 , UpperCAmelCase__ : Optional[int]=3 , ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : Dict = do_resize
SCREAMING_SNAKE_CASE : Union[str, Any] = size if size is not None else {"""shortest_edge""": 2_8_8}
SCREAMING_SNAKE_CASE : Union[str, Any] = size_divisor
SCREAMING_SNAKE_CASE : Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE : Dict = rescale_factor
SCREAMING_SNAKE_CASE : Union[str, Any] = do_normalize
SCREAMING_SNAKE_CASE : List[str] = do_center_crop
SCREAMING_SNAKE_CASE : Tuple = image_mean
SCREAMING_SNAKE_CASE : Tuple = image_std
SCREAMING_SNAKE_CASE : Tuple = do_pad
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : List[Any] = num_channels
SCREAMING_SNAKE_CASE : int = min_resolution
SCREAMING_SNAKE_CASE : str = max_resolution
def _lowercase ( self : Optional[Any] ) ->str:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple=False ) ->int:
"""simple docstring"""
if not batched:
SCREAMING_SNAKE_CASE : Dict = self.size["""shortest_edge"""]
SCREAMING_SNAKE_CASE : List[str] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
SCREAMING_SNAKE_CASE : List[Any] = image.size
else:
SCREAMING_SNAKE_CASE : Dict = image.shape[1], image.shape[2]
SCREAMING_SNAKE_CASE : List[Any] = size / min(lowerCAmelCase__ , lowerCAmelCase__ )
if h < w:
SCREAMING_SNAKE_CASE : str = size, scale * w
else:
SCREAMING_SNAKE_CASE : Optional[Any] = scale * h, size
SCREAMING_SNAKE_CASE : Tuple = int((1_3_3_3 / 8_0_0) * size )
if max(lowerCAmelCase__ , lowerCAmelCase__ ) > max_size:
SCREAMING_SNAKE_CASE : Union[str, Any] = max_size / max(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = newh * scale
SCREAMING_SNAKE_CASE : Any = neww * scale
SCREAMING_SNAKE_CASE : str = int(newh + 0.5 ), int(neww + 0.5 )
SCREAMING_SNAKE_CASE : int = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE : Optional[Any] = max(lowerCAmelCase__ , key=lambda UpperCAmelCase__ : item[0] )[0]
SCREAMING_SNAKE_CASE : Tuple = max(lowerCAmelCase__ , key=lambda UpperCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a__ ( _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int =BridgeTowerImageProcessor if is_vision_available() else None
def _lowercase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = BridgeTowerImageProcessingTester(self )
@property
def _lowercase ( self : Dict ) ->List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Optional[int] ) ->Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size_divisor""" ) )
def _lowercase ( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
pass
def _lowercase ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE : str = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE : Any = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE : Any = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : List[Any] ) ->str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE : Dict = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 245 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Optional[Any] = MgpstrTokenizer
lowercase__ : int = False
lowercase__ : Any = {}
lowercase__ : Optional[int] = False
def __magic_name__ ( self ) -> Optional[Any]:
super().setUp()
# fmt: off
__magic_name__ : List[str] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
__magic_name__ : List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__magic_name__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + """\n""" )
def __magic_name__ ( self , **lowerCAmelCase__ ) -> Optional[int]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[int]:
__magic_name__ : List[str] = """tester"""
__magic_name__ : int = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def __magic_name__ ( self ) -> str:
pass
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : List[Any] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ : Dict = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__magic_name__ : List[str] = tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
__magic_name__ : Tuple = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
self.assertTrue(special_token not in decoded )
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ ,__magic_name__ : Optional[Any] = self.get_input_output_texts(lowerCAmelCase__ )
__magic_name__ : List[Any] = tokenizer.tokenize(lowerCAmelCase__ )
__magic_name__ : Any = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertNotEqual(len(lowerCAmelCase__ ) , 0 )
__magic_name__ : Optional[int] = tokenizer.decode(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(text_a.replace(""" """ , """""" ) , lowerCAmelCase__ )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def __magic_name__ ( self ) -> Tuple:
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def __magic_name__ ( self ) -> Optional[Any]:
pass
| 342 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__="None" , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> List[Any]:
a : Union[str, Any] = parent
a : Dict = batch_size
a : Any = seq_length
a : Dict = is_training
a : str = use_input_mask
a : Tuple = use_token_type_ids
a : Union[str, Any] = use_labels
a : Union[str, Any] = vocab_size
a : int = hidden_size
a : int = num_hidden_layers
a : Union[str, Any] = num_attention_heads
a : str = intermediate_size
a : Any = hidden_act
a : List[Any] = hidden_dropout_prob
a : Tuple = attention_probs_dropout_prob
a : int = max_position_embeddings
a : Dict = type_vocab_size
a : int = type_sequence_label_size
a : List[Any] = initializer_range
a : Optional[int] = num_labels
a : Union[str, Any] = num_choices
a : Union[str, Any] = relative_attention
a : Dict = position_biased_input
a : Union[str, Any] = pos_att_type
a : str = scope
def __a ( self ) -> str:
a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : List[str] = None
if self.use_input_mask:
a : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
a : Dict = None
if self.use_token_type_ids:
a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a : Any = None
a : int = None
a : Dict = None
if self.use_labels:
a : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a : Optional[int] = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=lowerCAmelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
a : int = TFDebertaVaModel(config=lowerCAmelCase__ )
a : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
a : List[str] = [input_ids, input_mask]
a : Tuple = model(lowerCAmelCase__ )
a : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
a : Any = TFDebertaVaForMaskedLM(config=lowerCAmelCase__ )
a : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
a : int = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
a : Dict = self.num_labels
a : List[Any] = TFDebertaVaForSequenceClassification(config=lowerCAmelCase__ )
a : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
a : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
a : Dict = self.num_labels
a : Any = TFDebertaVaForTokenClassification(config=lowerCAmelCase__ )
a : Tuple = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
a : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
a : List[Any] = TFDebertaVaForQuestionAnswering(config=lowerCAmelCase__ )
a : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
a : Union[str, Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ) -> Dict:
a : List[str] = self.prepare_config_and_inputs()
(
a
) : Optional[int] = config_and_inputs
a : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowerCamelCase : int =(
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase : str =(
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase : Optional[Any] =False
lowerCamelCase : Any =False
def __a ( self ) -> Optional[Any]:
a : Optional[int] = TFDebertaVaModelTester(self )
a : Tuple = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def __a ( self ) -> List[str]:
self.config_tester.run_common_tests()
def __a ( self ) -> int:
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __a ( self ) -> Optional[Any]:
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def __a ( self ) -> List[Any]:
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def __a ( self ) -> Tuple:
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def __a ( self ) -> Any:
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@slow
def __a ( self ) -> str:
a : List[Any] = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(lowerCAmelCase__ )
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def __a ( self ) -> List[Any]:
pass
@slow
def __a ( self ) -> List[str]:
a : int = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
a : Tuple = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
a : int = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
a : Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
a : Union[str, Any] = tf.constant(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1E-4 )
| 105 |
import re
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : List[Any] = re.compile(R"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(_A, _A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("+918827897895"))
| 342 | 0 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
UpperCamelCase_ = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def _UpperCAmelCase ( _lowerCamelCase : str ) -> Optional[int]:
_lowerCAmelCase : List[str] = torch.load(_A , map_location="""cpu""" )
return sd
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : List[str]=rename_keys_prefix ) -> Optional[int]:
_lowerCAmelCase : Optional[Any] = OrderedDict()
_lowerCAmelCase : int = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_lowerCAmelCase : Union[str, Any] = key
for name_pair in rename_keys_prefix:
_lowerCAmelCase : str = new_key.replace(name_pair[0] , name_pair[1] )
_lowerCAmelCase : Union[str, Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_lowerCAmelCase : Tuple = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple ) -> Dict:
assert (
checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS
), f'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
_lowerCAmelCase : str = """pretraining"""
if "vcr" in checkpoint_path:
_lowerCAmelCase : Tuple = {"""visual_embedding_dim""": 5_12}
elif "vqa_advanced" in checkpoint_path:
_lowerCAmelCase : Union[str, Any] = {"""visual_embedding_dim""": 20_48}
elif "vqa" in checkpoint_path:
_lowerCAmelCase : Dict = {"""visual_embedding_dim""": 20_48}
elif "nlvr" in checkpoint_path:
_lowerCAmelCase : Any = {"""visual_embedding_dim""": 10_24}
else:
raise NotImplementedError(f'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
_lowerCAmelCase : str = {"""visual_embedding_dim""": 5_12}
_lowerCAmelCase : int = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
_lowerCAmelCase : Optional[int] = {"""visual_embedding_dim""": 20_48}
_lowerCAmelCase : Optional[int] = """vqa_advanced"""
elif "vqa" in checkpoint_path:
_lowerCAmelCase : Dict = {"""visual_embedding_dim""": 20_48, """num_labels""": 31_29}
_lowerCAmelCase : Any = """vqa"""
elif "nlvr" in checkpoint_path:
_lowerCAmelCase : Optional[int] = {
"""visual_embedding_dim""": 10_24,
"""num_labels""": 2,
}
_lowerCAmelCase : Tuple = """nlvr"""
_lowerCAmelCase : Tuple = VisualBertConfig(**_A )
# Load State Dict
_lowerCAmelCase : int = load_state_dict(_A )
_lowerCAmelCase : Optional[Any] = get_new_dict(_A , _A )
if model_type == "pretraining":
_lowerCAmelCase : Union[str, Any] = VisualBertForPreTraining(_A )
elif model_type == "vqa":
_lowerCAmelCase : Union[str, Any] = VisualBertForQuestionAnswering(_A )
elif model_type == "nlvr":
_lowerCAmelCase : str = VisualBertForVisualReasoning(_A )
elif model_type == "multichoice":
_lowerCAmelCase : str = VisualBertForMultipleChoice(_A )
model.load_state_dict(_A )
# Save Checkpoints
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
UpperCamelCase_ = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 309 |
import doctest
from collections import deque
import numpy as np
class snake_case__ :
def __init__( self ) -> None:
__magic_name__ : Any = [2, 1, 2, -1]
__magic_name__ : Tuple = [1, 2, 3, 4]
def __magic_name__ ( self ) -> list[float]:
__magic_name__ : Optional[Any] = len(self.first_signal )
__magic_name__ : Dict = len(self.second_signal )
__magic_name__ : Tuple = max(lowerCAmelCase__ , lowerCAmelCase__ )
# create a zero matrix of max_length x max_length
__magic_name__ : Optional[int] = [[0] * max_length for i in range(lowerCAmelCase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCAmelCase__ ):
__magic_name__ : List[str] = deque(self.second_signal )
rotated_signal.rotate(lowerCAmelCase__ )
for j, item in enumerate(lowerCAmelCase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__magic_name__ : List[Any] = np.matmul(np.transpose(lowerCAmelCase__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowerCAmelCase__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 342 | 0 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
_SCREAMING_SNAKE_CASE = 2_9_9_7_9_2_4_5_8
# Symbols
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = symbols("""ct x y z""")
def lowercase( UpperCamelCase_ ) -> float:
'''simple docstring'''
if velocity > c:
raise ValueError("""Speed must not exceed light speed 299,792,458 [m/s]!""" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("""Speed must be greater than or equal to 1!""" )
return velocity / c
def lowercase( UpperCamelCase_ ) -> float:
'''simple docstring'''
return 1 / sqrt(1 - beta(UpperCamelCase_ ) ** 2 )
def lowercase( UpperCamelCase_ ) -> np.ndarray:
'''simple docstring'''
return np.array(
[
[gamma(UpperCamelCase_ ), -gamma(UpperCamelCase_ ) * beta(UpperCamelCase_ ), 0, 0],
[-gamma(UpperCamelCase_ ) * beta(UpperCamelCase_ ), gamma(UpperCamelCase_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowercase( UpperCamelCase_ , UpperCamelCase_ = None ) -> np.ndarray:
'''simple docstring'''
# Ensure event is not empty
if event is None:
UpperCamelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(UpperCamelCase_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
_SCREAMING_SNAKE_CASE = transform(2_9_9_7_9_2_4_5)
print("""Example of four vector: """)
print(F'''ct\' = {four_vector[0]}''')
print(F'''x\' = {four_vector[1]}''')
print(F'''y\' = {four_vector[2]}''')
print(F'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
_SCREAMING_SNAKE_CASE = {ct: c, x: 1, y: 1, z: 1}
_SCREAMING_SNAKE_CASE = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F'''\n{numerical_vector}''')
| 343 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE = {
"""configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""ConvNextFeatureExtractor"""]
_SCREAMING_SNAKE_CASE = ["""ConvNextImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvNextForImageClassification""",
"""ConvNextModel""",
"""ConvNextPreTrainedModel""",
"""ConvNextBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""TFConvNextForImageClassification""",
"""TFConvNextModel""",
"""TFConvNextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 343 | 1 |
def lowercase( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
UpperCamelCase = 6
UpperCamelCase = 1
UpperCamelCase = 1901
UpperCamelCase = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
UpperCamelCase = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
UpperCamelCase = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
UpperCamelCase = day - days_per_month[month - 2]
if month > 12:
year += 1
UpperCamelCase = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 343 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = ShapEPipeline
__lowerCAmelCase = ["""prompt"""]
__lowerCAmelCase = ["""prompt"""]
__lowerCAmelCase = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
__lowerCAmelCase = False
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return 8
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowerCamelCase_ )
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
UpperCamelCase = PriorTransformer(**lowerCamelCase_ )
return model
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
UpperCamelCase = ShapERenderer(**lowerCamelCase_ )
return model
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.dummy_prior
UpperCamelCase = self.dummy_text_encoder
UpperCamelCase = self.dummy_tokenizer
UpperCamelCase = self.dummy_renderer
UpperCamelCase = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=lowerCamelCase_ , clip_sample=lowerCamelCase_ , clip_sample_range=1.0 , )
UpperCamelCase = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any]=0 ):
"""simple docstring"""
if str(lowerCamelCase_ ).startswith("""mps""" ):
UpperCamelCase = torch.manual_seed(lowerCamelCase_ )
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
UpperCamelCase = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = """cpu"""
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = self.pipeline_class(**lowerCamelCase_ )
UpperCamelCase = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
UpperCamelCase = output.images[0]
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCamelCase = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = torch_device == """cpu"""
UpperCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCamelCase_ , relax_max_difference=lowerCamelCase_ , )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = self.pipeline_class(**lowerCamelCase_ )
UpperCamelCase = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = 1
UpperCamelCase = 2
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
UpperCamelCase = batch_size * [inputs[key]]
UpperCamelCase = pipe(**lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
UpperCamelCase = ShapEPipeline.from_pretrained("""openai/shap-e""" )
UpperCamelCase = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCamelCase = pipe(
"""a shark""" , generator=lowerCamelCase_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
| 343 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = """gpt_neox"""
def __init__( self : Tuple , lowerCamelCase_ : Tuple=5_0432 , lowerCamelCase_ : Optional[Any]=6144 , lowerCamelCase_ : Any=44 , lowerCamelCase_ : List[str]=64 , lowerCamelCase_ : int=2_4576 , lowerCamelCase_ : Dict="gelu" , lowerCamelCase_ : Any=0.2_5 , lowerCamelCase_ : List[Any]=1_0000 , lowerCamelCase_ : Dict=0.0 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : List[Any]=2048 , lowerCamelCase_ : List[str]=0.0_2 , lowerCamelCase_ : Optional[Any]=1E-5 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Optional[int]=0 , lowerCamelCase_ : Any=2 , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : List[str]=None , **lowerCamelCase_ : int , ):
"""simple docstring"""
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = rotary_pct
UpperCamelCase = rotary_emb_base
UpperCamelCase = attention_dropout
UpperCamelCase = hidden_dropout
UpperCamelCase = classifier_dropout
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = use_cache
UpperCamelCase = tie_word_embeddings
UpperCamelCase = use_parallel_residual
UpperCamelCase = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCamelCase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"""got {self.rope_scaling}""" )
UpperCamelCase = self.rope_scaling.get("""type""" , lowerCamelCase_ )
UpperCamelCase = self.rope_scaling.get("""factor""" , lowerCamelCase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 343 | from __future__ import annotations
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> list:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase , UpperCamelCase = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
UpperCamelCase = result + left + right
return input_list
def lowercase( UpperCamelCase_ ) -> list:
'''simple docstring'''
if len(UpperCamelCase_ ) <= 1:
return input_list
UpperCamelCase = list(UpperCamelCase_ )
# iteration for two-way merging
UpperCamelCase = 2
while p <= len(UpperCamelCase_ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(UpperCamelCase_ ) , UpperCamelCase_ ):
UpperCamelCase = i
UpperCamelCase = i + p - 1
UpperCamelCase = (low + high + 1) // 2
UpperCamelCase = merge(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# final merge of last two parts
if p * 2 >= len(UpperCamelCase_ ):
UpperCamelCase = i
UpperCamelCase = merge(UpperCamelCase_ , 0 , UpperCamelCase_ , len(UpperCamelCase_ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""").strip()
if user_input == "":
_SCREAMING_SNAKE_CASE = []
else:
_SCREAMING_SNAKE_CASE = [int(item.strip()) for item in user_input.split(""",""")]
print(iter_merge_sort(unsorted))
| 343 | 1 |
_SCREAMING_SNAKE_CASE = range(2, 2_0 + 1)
_SCREAMING_SNAKE_CASE = [1_0**k for k in range(ks[-1] + 1)]
_SCREAMING_SNAKE_CASE = {}
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
UpperCamelCase = sum(a_i[j] for j in range(UpperCamelCase_ , len(UpperCamelCase_ ) ) )
UpperCamelCase = sum(a_i[j] * base[j] for j in range(min(len(UpperCamelCase_ ) , UpperCamelCase_ ) ) )
UpperCamelCase , UpperCamelCase = 0, 0
UpperCamelCase = n - i
UpperCamelCase = memo.get(UpperCamelCase_ )
if sub_memo is not None:
UpperCamelCase = sub_memo.get(UpperCamelCase_ )
if jumps is not None and len(UpperCamelCase_ ) > 0:
# find and make the largest jump without going over
UpperCamelCase = -1
for _k in range(len(UpperCamelCase_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
UpperCamelCase = _k
break
if max_jump >= 0:
UpperCamelCase , UpperCamelCase , UpperCamelCase = jumps[max_jump]
# since the difference between jumps is cached, add c
UpperCamelCase = diff + c
for j in range(min(UpperCamelCase_ , len(UpperCamelCase_ ) ) ):
UpperCamelCase , UpperCamelCase = divmod(UpperCamelCase_ , 10 )
if new_c > 0:
add(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else:
UpperCamelCase = []
else:
UpperCamelCase = {c: []}
UpperCamelCase = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
UpperCamelCase , UpperCamelCase = next_term(UpperCamelCase_ , k - 1 , i + dn , UpperCamelCase_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
UpperCamelCase , UpperCamelCase = compute(UpperCamelCase_ , UpperCamelCase_ , i + dn , UpperCamelCase_ )
diff += _diff
dn += terms_jumped
UpperCamelCase = sub_memo[c]
# keep jumps sorted by # of terms skipped
UpperCamelCase = 0
while j < len(UpperCamelCase_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCamelCase_ , (diff, dn, k) )
return (diff, dn)
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(UpperCamelCase_ ):
a_i.extend([0 for _ in range(k - len(UpperCamelCase_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
UpperCamelCase = i
UpperCamelCase , UpperCamelCase , UpperCamelCase = 0, 0, 0
for j in range(len(UpperCamelCase_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
UpperCamelCase = ds_c + ds_b
diff += addend
UpperCamelCase = 0
for j in range(UpperCamelCase_ ):
UpperCamelCase = a_i[j] + addend
UpperCamelCase , UpperCamelCase = divmod(UpperCamelCase_ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return diff, i - start_i
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
for j in range(UpperCamelCase_ , len(UpperCamelCase_ ) ):
UpperCamelCase = digits[j] + addend
if s >= 10:
UpperCamelCase , UpperCamelCase = divmod(UpperCamelCase_ , 10 )
UpperCamelCase = addend // 10 + quotient
else:
UpperCamelCase = s
UpperCamelCase = addend // 10
if addend == 0:
break
while addend > 0:
UpperCamelCase , UpperCamelCase = divmod(UpperCamelCase_ , 10 )
digits.append(UpperCamelCase_ )
def lowercase( UpperCamelCase_ = 10**15 ) -> int:
'''simple docstring'''
UpperCamelCase = [1]
UpperCamelCase = 1
UpperCamelCase = 0
while True:
UpperCamelCase , UpperCamelCase = next_term(UpperCamelCase_ , 20 , i + dn , UpperCamelCase_ )
dn += terms_jumped
if dn == n - i:
break
UpperCamelCase = 0
for j in range(len(UpperCamelCase_ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 343 | import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=32 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : int=10 , lowerCamelCase_ : Optional[int]=[8, 16, 32, 64] , lowerCamelCase_ : List[str]=[1, 1, 2, 1] , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : List[Any]="relu" , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=None , lowerCamelCase_ : List[Any]=["stage2", "stage3", "stage4"] , lowerCamelCase_ : Optional[Any]=[2, 3, 4] , lowerCamelCase_ : List[Any]=1 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = embeddings_size
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = len(lowerCamelCase_ )
UpperCamelCase = out_features
UpperCamelCase = out_indices
UpperCamelCase = num_groups
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCamelCase = BitModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = BitForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = BitBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase = None
UpperCamelCase = BitBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__lowerCAmelCase = (
{"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = BitModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return
@unittest.skip(reason="""Bit does not output attentions""" )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(config=lowerCamelCase_ )
for name, module in model.named_modules():
if isinstance(lowerCamelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ):
UpperCamelCase = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase = layer_type
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = BitModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase( ) -> Any:
'''simple docstring'''
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**lowerCamelCase_ )
# verify the logits
UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
UpperCamelCase = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (BitBackbone,) if is_torch_available() else ()
__lowerCAmelCase = BitConfig
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = BitModelTester(self )
| 343 | 1 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
_SCREAMING_SNAKE_CASE = {"""facebook/blenderbot-3B""": 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowercase( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCamelCase = bs[:]
UpperCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase_ )
cs.append(2**8 + n )
n += 1
UpperCamelCase = [chr(UpperCamelCase_ ) for n in cs]
return dict(zip(UpperCamelCase_ , UpperCamelCase_ ) )
def lowercase( UpperCamelCase_ ) -> str:
'''simple docstring'''
UpperCamelCase = set()
UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase = char
return pairs
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict="replace" , lowerCamelCase_ : Optional[int]="<s>" , lowerCamelCase_ : Tuple="</s>" , lowerCamelCase_ : List[str]="</s>" , lowerCamelCase_ : str="<s>" , lowerCamelCase_ : Tuple="<unk>" , lowerCamelCase_ : Any="<pad>" , lowerCamelCase_ : List[str]="<mask>" , lowerCamelCase_ : List[str]=False , **lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding="""utf-8""" ) as vocab_handle:
UpperCamelCase = json.load(lowerCamelCase_ )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = errors # how to handle errors in decoding
UpperCamelCase = bytes_to_unicode()
UpperCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ , encoding="""utf-8""" ) as merges_handle:
UpperCamelCase = merges_handle.read().split("""\n""" )[1:-1]
UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
UpperCamelCase = {}
UpperCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return len(self.encoder )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase = tuple(lowerCamelCase_ )
UpperCamelCase = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
UpperCamelCase = min(lowerCamelCase_ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase = bigram
UpperCamelCase = []
UpperCamelCase = 0
while i < len(lowerCamelCase_ ):
try:
UpperCamelCase = word.index(lowerCamelCase_ , lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase = tuple(lowerCamelCase_ )
UpperCamelCase = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
UpperCamelCase = get_pairs(lowerCamelCase_ )
UpperCamelCase = """ """.join(lowerCamelCase_ )
UpperCamelCase = word
return word
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = []
for token in re.findall(self.pat , lowerCamelCase_ ):
UpperCamelCase = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) )
return bpe_tokens
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : int ):
"""simple docstring"""
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
return self.decoder.get(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = """""".join(lowerCamelCase_ )
UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + """\n""" )
UpperCamelCase = 0
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
UpperCamelCase = token_index
writer.write(""" """.join(lowerCamelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any]=False , **lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
UpperCamelCase = """ """ + text
return (text, kwargs)
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : str , lowerCamelCase_ : "Conversation" ):
"""simple docstring"""
UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(lowerCamelCase_ )
UpperCamelCase = """ """.join(lowerCamelCase_ )
UpperCamelCase = self.encode(lowerCamelCase_ )
if len(lowerCamelCase_ ) > self.model_max_length:
UpperCamelCase = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 343 | from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=3 , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[int]=10 , lowerCamelCase_ : List[str]=[10, 20, 30, 40] , lowerCamelCase_ : Tuple=[1, 1, 2, 1] , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : Tuple="relu" , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Dict=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = embeddings_size
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = len(lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = TFResNetModel(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = TFResNetForImageClassification(lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__lowerCAmelCase = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = TFResNetModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : str ):
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase = layer_type
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFResNetModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase( ) -> Any:
'''simple docstring'''
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" )
# forward pass
UpperCamelCase = model(**lowerCamelCase_ )
# verify the logits
UpperCamelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
UpperCamelCase = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCamelCase_ , atol=1E-4 ) )
| 343 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = inspect.getfile(accelerate.test_utils )
UpperCamelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
UpperCamelCase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_distributed_data_loop.py"""] )
UpperCamelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_ops.py"""] )
@require_multi_gpu
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
UpperCamelCase = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase_ , env=os.environ.copy() )
@require_multi_gpu
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
UpperCamelCase = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(f"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase_ , env=os.environ.copy() )
@require_multi_gpu
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase_ , env=os.environ.copy() )
@require_multi_gpu
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
UpperCamelCase = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="""0,1""" ):
execute_subprocess_async(lowerCamelCase_ , env=os.environ.copy() )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = Accelerator()
_SCREAMING_SNAKE_CASE = (accelerator.state.process_index + 2, 1_0)
_SCREAMING_SNAKE_CASE = torch.randint(0, 1_0, shape).to(accelerator.device)
_SCREAMING_SNAKE_CASE = """"""
_SCREAMING_SNAKE_CASE = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_SCREAMING_SNAKE_CASE = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_SCREAMING_SNAKE_CASE = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 343 | import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_SCREAMING_SNAKE_CASE = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
def lowercase( UpperCamelCase_ ) -> Any:
'''simple docstring'''
UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
UpperCamelCase = value
else:
UpperCamelCase = value
return new_state_dict
def lowercase( UpperCamelCase_ , UpperCamelCase_=False ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = """"""
if is_panoptic:
UpperCamelCase = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
def lowercase( ) -> Any:
'''simple docstring'''
UpperCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any:
'''simple docstring'''
UpperCamelCase = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCamelCase = """resnet101"""
if "dc5" in model_name:
UpperCamelCase = True
UpperCamelCase = """panoptic""" in model_name
if is_panoptic:
UpperCamelCase = 250
else:
UpperCamelCase = 91
UpperCamelCase = """huggingface/label-files"""
UpperCamelCase = """coco-detection-id2label.json"""
UpperCamelCase = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
# load image processor
UpperCamelCase = """coco_panoptic""" if is_panoptic else """coco_detection"""
UpperCamelCase = ConditionalDetrImageProcessor(format=UpperCamelCase_ )
# prepare image
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=UpperCamelCase_ , return_tensors="""pt""" )
UpperCamelCase = encoding["""pixel_values"""]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
UpperCamelCase = torch.hub.load("""DeppMeng/ConditionalDETR""" , UpperCamelCase_ , pretrained=UpperCamelCase_ ).eval()
UpperCamelCase = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCamelCase = """conditional_detr.""" + src
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = rename_backbone_keys(UpperCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase_ , is_panoptic=UpperCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
# finally, create HuggingFace model and load state dict
UpperCamelCase = ConditionalDetrForSegmentation(UpperCamelCase_ ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
model.push_to_hub(repo_id=UpperCamelCase_ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
UpperCamelCase = conditional_detr(UpperCamelCase_ )
UpperCamelCase = model(UpperCamelCase_ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 343 | 1 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase( UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = torch.load(UpperCamelCase_ , map_location="""cpu""" )
if "model" in sd.keys():
UpperCamelCase = torch.load(UpperCamelCase_ , map_location="""cpu""" )["""model"""]
# pop unnecessary weights
UpperCamelCase = [
"""decoder.version""",
"""decoder.output_projection.weight""",
]
for key in keys_to_delete:
if key in sd:
sd.pop(UpperCamelCase_ )
UpperCamelCase = {
"""decoder.project_in_dim.weight""": """decoder.project_in.weight""",
"""decoder.project_out_dim.weight""": """decoder.project_out.weight""",
"""decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
UpperCamelCase = sd.pop(UpperCamelCase_ )
UpperCamelCase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
UpperCamelCase = sd[key]
# We split QKV in separate Q,K,V
UpperCamelCase = key.replace(""".qkv_proj.""" , """.q_proj.""" )
UpperCamelCase = key.replace(""".qkv_proj.""" , """.k_proj.""" )
UpperCamelCase = key.replace(""".qkv_proj.""" , """.v_proj.""" )
UpperCamelCase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
UpperCamelCase , UpperCamelCase , UpperCamelCase = torch.split(UpperCamelCase_ , depth // 3 , dim=0 )
UpperCamelCase = q
UpperCamelCase = k
UpperCamelCase = v
del sd[key]
return sd
@torch.no_grad()
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ) -> Tuple:
'''simple docstring'''
UpperCamelCase = load_checkpoint(UpperCamelCase_ )
if config is not None:
UpperCamelCase = OPTConfig.from_pretrained(UpperCamelCase_ )
else:
UpperCamelCase = OPTConfig()
UpperCamelCase = OPTModel(UpperCamelCase_ ).half().eval()
model.load_state_dict(UpperCamelCase_ )
# Check results
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 343 | from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Dict=13 , lowerCamelCase_ : str=30 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=3 , lowerCamelCase_ : Any=True , lowerCamelCase_ : int=True , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : int=4 , lowerCamelCase_ : str=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : List[Any]=10 , lowerCamelCase_ : List[Any]=0.0_2 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[Any]=0.6 , lowerCamelCase_ : Optional[Any]=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = mask_ratio
UpperCamelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = TFViTMAEModel(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = TFViTMAEForPreTraining(lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ )
# expected sequence length = num_patches
UpperCamelCase = (self.image_size // self.patch_size) ** 2
UpperCamelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = TFViTMAEForPreTraining(lowerCamelCase_ )
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ )
UpperCamelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__lowerCAmelCase = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = TFViTMAEModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
UpperCamelCase = copy.deepcopy(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ )
UpperCamelCase = outputs_dict[0].numpy()
UpperCamelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(lowerCamelCase_ : List[Any] ):
UpperCamelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(lowerCamelCase_ ):
UpperCamelCase = v.numpy()
else:
UpperCamelCase = np.array(lowerCamelCase_ )
return inputs_np_dict
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = prepare_numpy_arrays(lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ )
self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase = tf.constant(lowerCamelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase = tf_noise
super().check_pt_tf_models(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(lowerCamelCase_ )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(lowerCamelCase_ , lowerCamelCase_ ),)
if isinstance(lowerCamelCase_ , lowerCamelCase_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(lowerCamelCase_ , """_keras_serializable""" , lowerCamelCase_ )
}
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase = tf.convert_to_tensor(lowerCamelCase_ )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
UpperCamelCase = main_layer_class(lowerCamelCase_ )
UpperCamelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
UpperCamelCase = tf.keras.Model(lowerCamelCase_ , outputs=main_layer(lowerCamelCase_ ) )
UpperCamelCase = model(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = os.path.join(lowerCamelCase_ , """keras_model.h5""" )
model.save(lowerCamelCase_ )
UpperCamelCase = tf.keras.models.load_model(
lowerCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(lowerCamelCase_ , tf.keras.Model )
UpperCamelCase = model(lowerCamelCase_ )
self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
UpperCamelCase = outputs.last_hidden_state.numpy()
UpperCamelCase = 0
else:
UpperCamelCase = outputs.logits.numpy()
UpperCamelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ , saved_model=lowerCamelCase_ )
UpperCamelCase = model_class.from_pretrained(lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
UpperCamelCase = after_outputs["""last_hidden_state"""].numpy()
UpperCamelCase = 0
else:
UpperCamelCase = after_outputs["""logits"""].numpy()
UpperCamelCase = 0
UpperCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase_ , 1E-5 )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
UpperCamelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(lowerCamelCase_ )
UpperCamelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
UpperCamelCase = model_class.from_config(model.config )
UpperCamelCase = new_model(lowerCamelCase_ ) # Build model
new_model.set_weights(model.get_weights() )
UpperCamelCase = new_model(lowerCamelCase_ , noise=lowerCamelCase_ )
self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
pass
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase( ) -> int:
'''simple docstring'''
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase = ViTMAEConfig()
UpperCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ )
# verify the logits
UpperCamelCase = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
UpperCamelCase = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
| 343 | 1 |
def lowercase( UpperCamelCase_ ) -> list[list[float]]:
'''simple docstring'''
UpperCamelCase = []
for data in source_data:
for i, el in enumerate(UpperCamelCase_ ):
if len(UpperCamelCase_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(UpperCamelCase_ ) )
return data_lists
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> list[list[float]]:
'''simple docstring'''
UpperCamelCase = []
for dlist, weight in zip(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase = min(UpperCamelCase_ )
UpperCamelCase = max(UpperCamelCase_ )
UpperCamelCase = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
UpperCamelCase = f"""Invalid weight of {weight:f} provided"""
raise ValueError(UpperCamelCase_ )
score_lists.append(UpperCamelCase_ )
return score_lists
def lowercase( UpperCamelCase_ ) -> list[float]:
'''simple docstring'''
UpperCamelCase = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(UpperCamelCase_ ):
UpperCamelCase = final_scores[j] + ele
return final_scores
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> list[list[float]]:
'''simple docstring'''
UpperCamelCase = get_data(UpperCamelCase_ )
UpperCamelCase = calculate_each_score(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = generate_final_scores(UpperCamelCase_ )
# append scores to source data
for i, ele in enumerate(UpperCamelCase_ ):
source_data[i].append(UpperCamelCase_ )
return source_data
| 343 | def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool:
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(UpperCamelCase_ ) )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool:
'''simple docstring'''
# Base Case
if index == len(UpperCamelCase_ ):
return True
# Recursive Step
for i in range(UpperCamelCase_ ):
if valid_coloring(graph[index] , UpperCamelCase_ , UpperCamelCase_ ):
# Color current vertex
UpperCamelCase = i
# Validate coloring
if util_color(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , index + 1 ):
return True
# Backtrack
UpperCamelCase = -1
return False
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> list[int]:
'''simple docstring'''
UpperCamelCase = [-1] * len(UpperCamelCase_ )
if util_color(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , 0 ):
return colored_vertices
return []
| 343 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""MaskFormerFeatureExtractor"""]
_SCREAMING_SNAKE_CASE = ["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
_SCREAMING_SNAKE_CASE = [
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 343 | import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
_SCREAMING_SNAKE_CASE = {
"""allenai/led-base-16384""": 1_6_3_8_4,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase( ) -> List[str]:
'''simple docstring'''
UpperCamelCase = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCamelCase = bs[:]
UpperCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase_ )
cs.append(2**8 + n )
n += 1
UpperCamelCase = [chr(UpperCamelCase_ ) for n in cs]
return dict(zip(UpperCamelCase_ , UpperCamelCase_ ) )
def lowercase( UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
UpperCamelCase = set()
UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase = char
return pairs
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str="replace" , lowerCamelCase_ : Any="<s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : str="<s>" , lowerCamelCase_ : str="<unk>" , lowerCamelCase_ : int="<pad>" , lowerCamelCase_ : List[str]="<mask>" , lowerCamelCase_ : str=False , **lowerCamelCase_ : str , ):
"""simple docstring"""
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding="""utf-8""" ) as vocab_handle:
UpperCamelCase = json.load(lowerCamelCase_ )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = errors # how to handle errors in decoding
UpperCamelCase = bytes_to_unicode()
UpperCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ , encoding="""utf-8""" ) as merges_handle:
UpperCamelCase = merges_handle.read().split("""\n""" )[1:-1]
UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
UpperCamelCase = {}
UpperCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return len(self.encoder )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Dict ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase = tuple(lowerCamelCase_ )
UpperCamelCase = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
UpperCamelCase = min(lowerCamelCase_ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase = bigram
UpperCamelCase = []
UpperCamelCase = 0
while i < len(lowerCamelCase_ ):
try:
UpperCamelCase = word.index(lowerCamelCase_ , lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase = tuple(lowerCamelCase_ )
UpperCamelCase = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
UpperCamelCase = get_pairs(lowerCamelCase_ )
UpperCamelCase = """ """.join(lowerCamelCase_ )
UpperCamelCase = word
return word
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = []
for token in re.findall(self.pat , lowerCamelCase_ ):
UpperCamelCase = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) )
return bpe_tokens
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : str ):
"""simple docstring"""
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Any ):
"""simple docstring"""
return self.decoder.get(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = """""".join(lowerCamelCase_ )
UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def lowerCamelCase_ ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + """\n""" )
UpperCamelCase = 0
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
UpperCamelCase = token_index
writer.write(""" """.join(lowerCamelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=False , **lowerCamelCase_ : Any ):
"""simple docstring"""
UpperCamelCase = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
UpperCamelCase = """ """ + text
return (text, kwargs)
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[bool] = None , ):
"""simple docstring"""
UpperCamelCase = super()._pad(
encoded_inputs=lowerCamelCase_ , max_length=lowerCamelCase_ , padding_strategy=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
# Load from model defaults
if return_attention_mask is None:
UpperCamelCase = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCamelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCamelCase = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ )
if needs_to_be_padded:
UpperCamelCase = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCamelCase = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCamelCase = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 343 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = """audio-spectrogram-transformer"""
def __init__( self : List[Any] , lowerCamelCase_ : str=768 , lowerCamelCase_ : Dict=12 , lowerCamelCase_ : str=12 , lowerCamelCase_ : int=3072 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Dict=0.0 , lowerCamelCase_ : int=0.0_2 , lowerCamelCase_ : Tuple=1E-12 , lowerCamelCase_ : Dict=16 , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : List[str]=10 , lowerCamelCase_ : Tuple=10 , lowerCamelCase_ : Dict=1024 , lowerCamelCase_ : Any=128 , **lowerCamelCase_ : str , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = patch_size
UpperCamelCase = qkv_bias
UpperCamelCase = frequency_stride
UpperCamelCase = time_stride
UpperCamelCase = max_length
UpperCamelCase = num_mel_bins
| 343 | import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_SCREAMING_SNAKE_CASE = (
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
_SCREAMING_SNAKE_CASE = (
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
_SCREAMING_SNAKE_CASE = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
_SCREAMING_SNAKE_CASE = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
_SCREAMING_SNAKE_CASE = (
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 1_4]),
("""2H 5D 3C AS 5S""", False, [1_4, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [1_4, 1_3, 1_2, 1_1, 1_0]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
_SCREAMING_SNAKE_CASE = (
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
_SCREAMING_SNAKE_CASE = (
("""JH AH TH KH QH""", 2_3),
("""JH 9H TH KH QH""", 2_2),
("""JC KH JS JD JH""", 2_1),
("""KH KC 3S 3H 3D""", 2_0),
("""8C 9C 5C 3C TC""", 1_9),
("""JS QS 9H TS KH""", 1_8),
("""7C 7S KH 2H 7H""", 1_7),
("""3C KH 5D 5S KH""", 1_6),
("""QH 8H KD JH 8S""", 1_5),
("""2D 6D 9D TH 7D""", 1_4),
)
def lowercase( ) -> Dict:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = randrange(len(UpperCamelCase_ ) ), randrange(len(UpperCamelCase_ ) )
UpperCamelCase = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
UpperCamelCase , UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowercase( UpperCamelCase_ = 100 ) -> List[Any]:
'''simple docstring'''
return (generate_random_hand() for _ in range(UpperCamelCase_ ))
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
UpperCamelCase = PokerHand(UpperCamelCase_ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected
def lowercase( ) -> Dict:
'''simple docstring'''
UpperCamelCase = [PokerHand(UpperCamelCase_ ) for hand in SORTED_HANDS]
UpperCamelCase = poker_hands.copy()
shuffle(UpperCamelCase_ )
UpperCamelCase = chain(sorted(UpperCamelCase_ ) )
for index, hand in enumerate(UpperCamelCase_ ):
assert hand == poker_hands[index]
def lowercase( ) -> Union[str, Any]:
'''simple docstring'''
# Test that five high straights are compared correctly.
UpperCamelCase = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=UpperCamelCase_ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowercase( ) -> str:
'''simple docstring'''
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
UpperCamelCase = PokerHand("""2C 4S AS 3D 5C""" )
UpperCamelCase = True
UpperCamelCase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowercase( ) -> int:
'''simple docstring'''
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
UpperCamelCase = 0
UpperCamelCase = os.path.abspath(os.path.dirname(UpperCamelCase_ ) )
UpperCamelCase = os.path.join(UpperCamelCase_ , """poker_hands.txt""" )
with open(UpperCamelCase_ ) as file_hand:
for line in file_hand:
UpperCamelCase = line[:14].strip()
UpperCamelCase = line[15:].strip()
UpperCamelCase , UpperCamelCase = PokerHand(UpperCamelCase_ ), PokerHand(UpperCamelCase_ )
UpperCamelCase = player.compare_with(UpperCamelCase_ )
if output == "Win":
answer += 1
assert answer == 376
| 343 | 1 |
from __future__ import annotations
def lowercase( UpperCamelCase_ ) -> list[int]:
'''simple docstring'''
if len(UpperCamelCase_ ) == 0:
return array
UpperCamelCase , UpperCamelCase = min(UpperCamelCase_ ), max(UpperCamelCase_ )
# Compute the variables
UpperCamelCase = _max - _min + 1
UpperCamelCase , UpperCamelCase = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
UpperCamelCase = i - _min
UpperCamelCase = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
UpperCamelCase = 0
for i in range(UpperCamelCase_ ):
while holes_repeat[i] > 0:
UpperCamelCase = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = input("""Enter numbers separated by comma:\n""")
_SCREAMING_SNAKE_CASE = [int(x) for x in user_input.split(""",""")]
print(pigeon_sort(unsorted))
| 343 | import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = """xlnet"""
__lowerCAmelCase = ["""mems"""]
__lowerCAmelCase = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , lowerCamelCase_ : Any=3_2000 , lowerCamelCase_ : Dict=1024 , lowerCamelCase_ : List[str]=24 , lowerCamelCase_ : Dict=16 , lowerCamelCase_ : List[Any]=4096 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Union[str, Any]="bi" , lowerCamelCase_ : Optional[Any]=0.0_2 , lowerCamelCase_ : Optional[int]=1E-12 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : Union[str, Any]=512 , lowerCamelCase_ : Any=None , lowerCamelCase_ : str=True , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Optional[int]=-1 , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Union[str, Any]="last" , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : str="tanh" , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Dict=5 , lowerCamelCase_ : str=5 , lowerCamelCase_ : Optional[int]=5 , lowerCamelCase_ : Any=1 , lowerCamelCase_ : int=2 , **lowerCamelCase_ : List[Any] , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = d_model
UpperCamelCase = n_layer
UpperCamelCase = n_head
if d_model % n_head != 0:
raise ValueError(f"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
UpperCamelCase = d_model // n_head
UpperCamelCase = ff_activation
UpperCamelCase = d_inner
UpperCamelCase = untie_r
UpperCamelCase = attn_type
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = dropout
UpperCamelCase = mem_len
UpperCamelCase = reuse_len
UpperCamelCase = bi_data
UpperCamelCase = clamp_len
UpperCamelCase = same_length
UpperCamelCase = summary_type
UpperCamelCase = summary_use_proj
UpperCamelCase = summary_activation
UpperCamelCase = summary_last_dropout
UpperCamelCase = start_n_top
UpperCamelCase = end_n_top
UpperCamelCase = bos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"""The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"""
""" instead.""" , lowerCamelCase_ , )
UpperCamelCase = kwargs["""use_cache"""]
UpperCamelCase = use_mems_eval
UpperCamelCase = use_mems_train
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
@property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 343 | 1 |
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
def update_area_of_max_square(UpperCamelCase_ , UpperCamelCase_ ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
UpperCamelCase = update_area_of_max_square(UpperCamelCase_ , col + 1 )
UpperCamelCase = update_area_of_max_square(row + 1 , col + 1 )
UpperCamelCase = update_area_of_max_square(row + 1 , UpperCamelCase_ )
if mat[row][col]:
UpperCamelCase = 1 + min([right, diagonal, down] )
UpperCamelCase = max(largest_square_area[0] , UpperCamelCase_ )
return sub_problem_sol
else:
return 0
UpperCamelCase = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
UpperCamelCase = update_area_of_max_square_using_dp_array(UpperCamelCase_ , col + 1 , UpperCamelCase_ )
UpperCamelCase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , UpperCamelCase_ )
UpperCamelCase = update_area_of_max_square_using_dp_array(row + 1 , UpperCamelCase_ , UpperCamelCase_ )
if mat[row][col]:
UpperCamelCase = 1 + min([right, diagonal, down] )
UpperCamelCase = max(largest_square_area[0] , UpperCamelCase_ )
UpperCamelCase = sub_problem_sol
return sub_problem_sol
else:
return 0
UpperCamelCase = [0]
UpperCamelCase = [[-1] * cols for _ in range(UpperCamelCase_ )]
update_area_of_max_square_using_dp_array(0 , 0 , UpperCamelCase_ )
return largest_square_area[0]
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
UpperCamelCase = [[0] * (cols + 1) for _ in range(rows + 1 )]
UpperCamelCase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
UpperCamelCase = dp_array[row][col + 1]
UpperCamelCase = dp_array[row + 1][col + 1]
UpperCamelCase = dp_array[row + 1][col]
if mat[row][col] == 1:
UpperCamelCase = 1 + min(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = max(dp_array[row][col] , UpperCamelCase_ )
else:
UpperCamelCase = 0
return largest_square_area
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
UpperCamelCase = [0] * (cols + 1)
UpperCamelCase = [0] * (cols + 1)
UpperCamelCase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
UpperCamelCase = current_row[col + 1]
UpperCamelCase = next_row[col + 1]
UpperCamelCase = next_row[col]
if mat[row][col] == 1:
UpperCamelCase = 1 + min(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = max(current_row[col] , UpperCamelCase_ )
else:
UpperCamelCase = 0
UpperCamelCase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 343 | import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures""")
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy-config.json""")
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = 0
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
UpperCamelCase = WavaVecaFeatureExtractor(**lowerCamelCase_ )
# save in new folder
model_config.save_pretrained(lowerCamelCase_ )
config.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
# make sure private variable is not incorrectly saved
UpperCamelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , revision="""aaaaaa""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
with self.assertRaises(lowerCamelCase_ ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase_ ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase_ ):
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCamelCase = CustomFeatureExtractor.from_pretrained(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = True
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# If remote code is not set, the default is to use local
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(lowerCamelCase_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 343 | 1 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
def __init__( self : List[Any] , lowerCamelCase_ : WhisperForConditionalGeneration , lowerCamelCase_ : WhisperProcessor , lowerCamelCase_ : AutoencoderKL , lowerCamelCase_ : CLIPTextModel , lowerCamelCase_ : CLIPTokenizer , lowerCamelCase_ : UNetaDConditionModel , lowerCamelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase_ : StableDiffusionSafetyChecker , lowerCamelCase_ : CLIPImageProcessor , ):
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=lowerCamelCase_ , speech_processor=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[Union[str, int]] = "auto" ):
"""simple docstring"""
if slice_size == "auto":
UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
self.enable_attention_slicing(lowerCamelCase_ )
@torch.no_grad()
def __call__( self : List[str] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict=1_6000 , lowerCamelCase_ : int = 512 , lowerCamelCase_ : int = 512 , lowerCamelCase_ : int = 50 , lowerCamelCase_ : float = 7.5 , lowerCamelCase_ : Optional[Union[str, List[str]]] = None , lowerCamelCase_ : Optional[int] = 1 , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : Optional[torch.Generator] = None , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ : int = 1 , **lowerCamelCase_ : Any , ):
"""simple docstring"""
UpperCamelCase = self.speech_processor.feature_extractor(
lowerCamelCase_ , return_tensors="""pt""" , sampling_rate=lowerCamelCase_ ).input_features.to(self.device )
UpperCamelCase = self.speech_model.generate(lowerCamelCase_ , max_length=48_0000 )
UpperCamelCase = self.speech_processor.tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , normalize=lowerCamelCase_ )[
0
]
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase = 1
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase = len(lowerCamelCase_ )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase_ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(lowerCamelCase_ )}.""" )
# get prompt text embeddings
UpperCamelCase = self.tokenizer(
lowerCamelCase_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase , UpperCamelCase , UpperCamelCase = text_embeddings.shape
UpperCamelCase = text_embeddings.repeat(1 , lowerCamelCase_ , 1 )
UpperCamelCase = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCamelCase_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase = 42
if negative_prompt is None:
UpperCamelCase = [""""""] * batch_size
elif type(lowerCamelCase_ ) is not type(lowerCamelCase_ ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase_ )} !="""
f""" {type(lowerCamelCase_ )}.""" )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase = [negative_prompt]
elif batch_size != len(lowerCamelCase_ ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase_ )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
UpperCamelCase = negative_prompt
UpperCamelCase = text_input_ids.shape[-1]
UpperCamelCase = self.tokenizer(
lowerCamelCase_ , padding="""max_length""" , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ , return_tensors="""pt""" , )
UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase = uncond_embeddings.shape[1]
UpperCamelCase = uncond_embeddings.repeat(1 , lowerCamelCase_ , 1 )
UpperCamelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCamelCase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase = torch.randn(lowerCamelCase_ , generator=lowerCamelCase_ , device="""cpu""" , dtype=lowerCamelCase_ ).to(
self.device )
else:
UpperCamelCase = torch.randn(lowerCamelCase_ , generator=lowerCamelCase_ , device=self.device , dtype=lowerCamelCase_ )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
UpperCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase = {}
if accepts_eta:
UpperCamelCase = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase = self.scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# predict the noise residual
UpperCamelCase = self.unet(lowerCamelCase_ , lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase = noise_pred.chunk(2 )
UpperCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = 1 / 0.1_8_2_1_5 * latents
UpperCamelCase = self.vae.decode(lowerCamelCase_ ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCamelCase_ , nsfw_content_detected=lowerCamelCase_ )
| 343 | import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
def lowercase( UpperCamelCase_ ) -> Dict:
'''simple docstring'''
# word like '180' or '身高' or '神'
for char in word:
UpperCamelCase = ord(UpperCamelCase_ )
if not _is_chinese_char(UpperCamelCase_ ):
return 0
return 1
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = set()
for token in tokens:
UpperCamelCase = len(UpperCamelCase_ ) > 1 and is_chinese(UpperCamelCase_ )
if chinese_word:
word_set.add(UpperCamelCase_ )
UpperCamelCase = list(UpperCamelCase_ )
return word_list
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
UpperCamelCase = max([len(UpperCamelCase_ ) for w in chinese_word_set] )
UpperCamelCase = bert_tokens
UpperCamelCase , UpperCamelCase = 0, len(UpperCamelCase_ )
while start < end:
UpperCamelCase = True
if is_chinese(bert_word[start] ):
UpperCamelCase = min(end - start , UpperCamelCase_ )
for i in range(UpperCamelCase_ , 1 , -1 ):
UpperCamelCase = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCamelCase = """##""" + bert_word[j]
UpperCamelCase = start + i
UpperCamelCase = False
break
if single_word:
start += 1
return bert_word
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
UpperCamelCase = []
for i in range(0 , len(UpperCamelCase_ ) , 100 ):
UpperCamelCase = ltp_tokenizer.seg(lines[i : i + 100] )[0]
UpperCamelCase = [get_chinese_word(UpperCamelCase_ ) for r in res]
ltp_res.extend(UpperCamelCase_ )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
UpperCamelCase = []
for i in range(0 , len(UpperCamelCase_ ) , 100 ):
UpperCamelCase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
UpperCamelCase = []
for input_ids, chinese_word in zip(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase = []
for id in input_ids:
UpperCamelCase = bert_tokenizer._convert_id_to_token(UpperCamelCase_ )
input_tokens.append(UpperCamelCase_ )
UpperCamelCase = add_sub_symbol(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCamelCase_ ):
if token[:2] == "##":
UpperCamelCase = token[2:]
# save chinese tokens' pos
if len(UpperCamelCase_ ) == 1 and _is_chinese_char(ord(UpperCamelCase_ ) ):
ref_id.append(UpperCamelCase_ )
ref_ids.append(UpperCamelCase_ )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
return ref_ids
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
UpperCamelCase = f.readlines()
UpperCamelCase = [line.strip() for line in data if len(UpperCamelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCamelCase = LTP(args.ltp ) # faster in GPU device
UpperCamelCase = BertTokenizer.from_pretrained(args.bert )
UpperCamelCase = prepare_ref(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
UpperCamelCase = [json.dumps(UpperCamelCase_ ) + """\n""" for ref in ref_ids]
f.writelines(UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
main(args)
| 343 | 1 |
from __future__ import annotations
_SCREAMING_SNAKE_CASE = 1.6021E-19 # units = C
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> tuple[str, float]:
'''simple docstring'''
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343 | import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def __init__( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=13 , lowerCamelCase_ : Union[str, Any]=30 , lowerCamelCase_ : str=2 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : Union[str, Any]=5 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : Any=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : Union[str, Any]=10 , lowerCamelCase_ : Optional[Any]=0.0_2 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = num_patches + 1
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
return config, pixel_values
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = FlaxViTModel(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase = (self.image_size, self.image_size)
UpperCamelCase = (self.patch_size, self.patch_size)
UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.type_sequence_label_size
UpperCamelCase = FlaxViTForImageClassification(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = FlaxViTForImageClassification(lowerCamelCase_ )
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = FlaxViTModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = model_class(lowerCamelCase_ )
@jax.jit
def model_jitted(lowerCamelCase_ : Any , **lowerCamelCase_ : Any ):
return model(pixel_values=lowerCamelCase_ , **lowerCamelCase_ )
with self.subTest("""JIT Enabled""" ):
UpperCamelCase = model_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCamelCase = model_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
UpperCamelCase = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowerCamelCase_ )
| 343 | 1 |
from collections.abc import Callable
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Optional[int] , lowerCamelCase_ : Callable | None = None ):
"""simple docstring"""
UpperCamelCase = []
# Stores indexes of each item for supporting updates and deletion.
UpperCamelCase = {}
# Stores current size of heap.
UpperCamelCase = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
UpperCamelCase = key or (lambda lowerCamelCase_ : x)
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int ):
"""simple docstring"""
return int((i - 1) / 2 ) if i > 0 else None
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = int(2 * i + 1 )
return left if 0 < left < self.size else None
def lowerCamelCase_ ( self : str , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = int(2 * i + 2 )
return right if 0 < right < self.size else None
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
UpperCamelCase , UpperCamelCase = self.arr[j], self.arr[i]
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : int ):
"""simple docstring"""
return self.arr[i][1] < self.arr[j][1]
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = self._left(lowerCamelCase_ )
UpperCamelCase = self._right(lowerCamelCase_ )
UpperCamelCase = i
if left is not None and not self._cmp(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase = left
if right is not None and not self._cmp(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase = right
return valid_parent
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = self._parent(lowerCamelCase_ )
while parent is not None and not self._cmp(lowerCamelCase_ , lowerCamelCase_ ):
self._swap(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase , UpperCamelCase = parent, self._parent(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = self._get_valid_parent(lowerCamelCase_ )
while valid_parent != index:
self._swap(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase , UpperCamelCase = valid_parent, self._get_valid_parent(lowerCamelCase_ )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : int , lowerCamelCase_ : int ):
"""simple docstring"""
if item not in self.pos_map:
return
UpperCamelCase = self.pos_map[item]
UpperCamelCase = [item, self.key(lowerCamelCase_ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowerCamelCase_ )
self._heapify_down(lowerCamelCase_ )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : int ):
"""simple docstring"""
if item not in self.pos_map:
return
UpperCamelCase = self.pos_map[item]
del self.pos_map[item]
UpperCamelCase = self.arr[self.size - 1]
UpperCamelCase = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowerCamelCase_ )
self._heapify_down(lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(lowerCamelCase_ )] )
else:
UpperCamelCase = [item, self.key(lowerCamelCase_ )]
UpperCamelCase = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return self.arr[0] if self.size else None
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowercase( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343 | import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str=13 , lowerCamelCase_ : Any=7 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Dict=99 , lowerCamelCase_ : str=24 , lowerCamelCase_ : Optional[int]=2 , lowerCamelCase_ : List[str]=6 , lowerCamelCase_ : List[Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any=512 , lowerCamelCase_ : List[Any]=16 , lowerCamelCase_ : List[Any]=2 , lowerCamelCase_ : int=0.0_2 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[Any]=1000 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = range_bbox
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase = bbox[i, j, 3]
UpperCamelCase = bbox[i, j, 1]
UpperCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase = bbox[i, j, 2]
UpperCamelCase = bbox[i, j, 0]
UpperCamelCase = t
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , ):
"""simple docstring"""
UpperCamelCase = LiltModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = LiltForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , ):
"""simple docstring"""
UpperCamelCase = LiltForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCAmelCase = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict ):
"""simple docstring"""
return True
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = LiltModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = LiltModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(lowerCamelCase_ )
UpperCamelCase = torch.tensor([[1, 2]] , device=lowerCamelCase_ )
UpperCamelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(input_ids=lowerCamelCase_ , bbox=lowerCamelCase_ )
UpperCamelCase = torch.Size([1, 2, 768] )
UpperCamelCase = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=lowerCamelCase_ , )
self.assertTrue(outputs.last_hidden_state.shape , lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowerCamelCase_ , atol=1E-3 ) )
| 343 | 1 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_SCREAMING_SNAKE_CASE = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
def __init__( self : List[str] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : int=1 ):
"""simple docstring"""
UpperCamelCase = tokenizer
UpperCamelCase = dataset
UpperCamelCase = len(lowerCamelCase_ ) if n_tasks is None else n_tasks
UpperCamelCase = n_copies
def __iter__( self : Dict ):
"""simple docstring"""
UpperCamelCase = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["""prompt"""].strip() )
UpperCamelCase = self.tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors="""pt""" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
def __init__( self : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ):
"""simple docstring"""
UpperCamelCase = start_length
UpperCamelCase = eof_strings
UpperCamelCase = tokenizer
def __call__( self : Dict , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
UpperCamelCase = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase_ )
def lowercase( UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
UpperCamelCase = re.split("""(%s)""" % """|""".join(UpperCamelCase_ ) , UpperCamelCase_ )
# last string should be ""
return "".join(string_list[:-2] )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=20 , **UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
UpperCamelCase = defaultdict(UpperCamelCase_ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(UpperCamelCase_ ) ):
with torch.no_grad():
UpperCamelCase = batch["""ids"""].shape[-1]
UpperCamelCase = accelerator.unwrap_model(UpperCamelCase_ ).generate(
input_ids=batch["""ids"""][:, : batch["""input_len"""]] , num_return_sequences=UpperCamelCase_ , **UpperCamelCase_ )
# each task is generated batch_size times
UpperCamelCase = batch["""task_id"""].repeat(UpperCamelCase_ )
UpperCamelCase = accelerator.pad_across_processes(
UpperCamelCase_ , dim=1 , pad_index=tokenizer.pad_token_id )
UpperCamelCase , UpperCamelCase = accelerator.gather((generated_tokens, generated_tasks) )
UpperCamelCase = generated_tokens.cpu().numpy()
UpperCamelCase = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(UpperCamelCase_ , UpperCamelCase_ ):
gen_token_dict[task].append(UpperCamelCase_ )
UpperCamelCase = [[] for _ in range(UpperCamelCase_ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
UpperCamelCase = tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
code_gens[task].append(remove_last_block(UpperCamelCase_ ) )
return code_gens
def lowercase( ) -> str:
'''simple docstring'''
# Setup configuration
UpperCamelCase = HfArgumentParser(UpperCamelCase_ )
UpperCamelCase = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
UpperCamelCase = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
UpperCamelCase = """false"""
if args.num_workers is None:
UpperCamelCase = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
UpperCamelCase = Accelerator()
set_seed(args.seed , device_specific=UpperCamelCase_ )
# Load model and tokenizer
UpperCamelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCamelCase = tokenizer.eos_token
UpperCamelCase = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
UpperCamelCase = {
"""do_sample""": args.do_sample,
"""temperature""": args.temperature,
"""max_new_tokens""": args.max_new_tokens,
"""top_p""": args.top_p,
"""top_k""": args.top_k,
"""stopping_criteria""": StoppingCriteriaList([EndOfFunctionCriteria(0 , UpperCamelCase_ , UpperCamelCase_ )] ),
}
# Load evaluation dataset and metric
UpperCamelCase = load_dataset("""openai_humaneval""" )
UpperCamelCase = load_metric("""code_eval""" )
UpperCamelCase = args.num_tasks if args.num_tasks is not None else len(human_eval["""test"""] )
UpperCamelCase = args.n_samples // args.batch_size
UpperCamelCase = TokenizedDataset(UpperCamelCase_ , human_eval["""test"""] , n_copies=UpperCamelCase_ , n_tasks=UpperCamelCase_ )
# do not confuse args.batch_size, which is actually the num_return_sequences
UpperCamelCase = DataLoader(UpperCamelCase_ , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
UpperCamelCase = code_eval_metric.compute(references=[""""""] , predictions=[[""""""]] )
except ValueError as exception:
print(
"""Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"""
""" flag to enable code evaluation.""" )
raise exception
UpperCamelCase , UpperCamelCase = accelerator.prepare(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = complete_code(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , n_tasks=UpperCamelCase_ , batch_size=args.batch_size , **UpperCamelCase_ , )
if accelerator.is_main_process:
UpperCamelCase = []
for task in tqdm(range(UpperCamelCase_ ) ):
UpperCamelCase = human_eval["""test"""][task]["""test"""]
UpperCamelCase = f"""check({human_eval["test"][task]["entry_point"]})"""
references.append("""\n""" + test_func + """\n""" + entry_point )
# Evaluate completions with "code_eval" metric
UpperCamelCase , UpperCamelCase = code_eval_metric.compute(
references=UpperCamelCase_ , predictions=UpperCamelCase_ , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , """w""" ) as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 343 | import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def __init__( self : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict=7 , lowerCamelCase_ : str=3 , lowerCamelCase_ : Any=30 , lowerCamelCase_ : str=400 , lowerCamelCase_ : str=True , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Dict=0.9 , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Dict=[0.5, 0.5, 0.5] , lowerCamelCase_ : Any=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
UpperCamelCase = size if size is not None else {"""shortest_edge""": 30}
UpperCamelCase = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize_and_center_crop
UpperCamelCase = size
UpperCamelCase = crop_pct
UpperCamelCase = crop_size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = PoolFormerImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """crop_pct""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 343 | 1 |
from typing import Any
import numpy as np
def lowercase( UpperCamelCase_ ) -> bool:
'''simple docstring'''
return np.array_equal(UpperCamelCase_ , matrix.conjugate().T )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any:
'''simple docstring'''
UpperCamelCase = v.conjugate().T
UpperCamelCase = v_star.dot(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , np.ndarray )
return (v_star_dot.dot(UpperCamelCase_ )) / (v_star.dot(UpperCamelCase_ ))
def lowercase( ) -> None:
'''simple docstring'''
UpperCamelCase = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
UpperCamelCase = np.array([[1], [2], [3]] )
assert is_hermitian(UpperCamelCase_ ), f"""{a} is not hermitian."""
print(rayleigh_quotient(UpperCamelCase_ , UpperCamelCase_ ) )
UpperCamelCase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(UpperCamelCase_ ), f"""{a} is not hermitian."""
assert rayleigh_quotient(UpperCamelCase_ , UpperCamelCase_ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 343 | def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> float:
'''simple docstring'''
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(UpperCamelCase_ ) * abs(UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 343 | 1 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_SCREAMING_SNAKE_CASE = (
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
_SCREAMING_SNAKE_CASE = (
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
_SCREAMING_SNAKE_CASE = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
_SCREAMING_SNAKE_CASE = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
_SCREAMING_SNAKE_CASE = (
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 1_4]),
("""2H 5D 3C AS 5S""", False, [1_4, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [1_4, 1_3, 1_2, 1_1, 1_0]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
_SCREAMING_SNAKE_CASE = (
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
_SCREAMING_SNAKE_CASE = (
("""JH AH TH KH QH""", 2_3),
("""JH 9H TH KH QH""", 2_2),
("""JC KH JS JD JH""", 2_1),
("""KH KC 3S 3H 3D""", 2_0),
("""8C 9C 5C 3C TC""", 1_9),
("""JS QS 9H TS KH""", 1_8),
("""7C 7S KH 2H 7H""", 1_7),
("""3C KH 5D 5S KH""", 1_6),
("""QH 8H KD JH 8S""", 1_5),
("""2D 6D 9D TH 7D""", 1_4),
)
def lowercase( ) -> Dict:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = randrange(len(UpperCamelCase_ ) ), randrange(len(UpperCamelCase_ ) )
UpperCamelCase = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
UpperCamelCase , UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowercase( UpperCamelCase_ = 100 ) -> List[Any]:
'''simple docstring'''
return (generate_random_hand() for _ in range(UpperCamelCase_ ))
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
UpperCamelCase = PokerHand(UpperCamelCase_ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected
def lowercase( ) -> Dict:
'''simple docstring'''
UpperCamelCase = [PokerHand(UpperCamelCase_ ) for hand in SORTED_HANDS]
UpperCamelCase = poker_hands.copy()
shuffle(UpperCamelCase_ )
UpperCamelCase = chain(sorted(UpperCamelCase_ ) )
for index, hand in enumerate(UpperCamelCase_ ):
assert hand == poker_hands[index]
def lowercase( ) -> Union[str, Any]:
'''simple docstring'''
# Test that five high straights are compared correctly.
UpperCamelCase = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=UpperCamelCase_ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowercase( ) -> str:
'''simple docstring'''
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
UpperCamelCase = PokerHand("""2C 4S AS 3D 5C""" )
UpperCamelCase = True
UpperCamelCase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowercase( ) -> int:
'''simple docstring'''
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
UpperCamelCase = 0
UpperCamelCase = os.path.abspath(os.path.dirname(UpperCamelCase_ ) )
UpperCamelCase = os.path.join(UpperCamelCase_ , """poker_hands.txt""" )
with open(UpperCamelCase_ ) as file_hand:
for line in file_hand:
UpperCamelCase = line[:14].strip()
UpperCamelCase = line[15:].strip()
UpperCamelCase , UpperCamelCase = PokerHand(UpperCamelCase_ ), PokerHand(UpperCamelCase_ )
UpperCamelCase = player.compare_with(UpperCamelCase_ )
if output == "Win":
answer += 1
assert answer == 376
| 343 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = """trocr"""
__lowerCAmelCase = ["""past_key_values"""]
__lowerCAmelCase = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[int]=5_0265 , lowerCamelCase_ : Optional[int]=1024 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Any=16 , lowerCamelCase_ : Tuple=4096 , lowerCamelCase_ : Tuple="gelu" , lowerCamelCase_ : List[str]=512 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=0.0 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : Union[str, Any]=2 , lowerCamelCase_ : Tuple=0.0_2 , lowerCamelCase_ : Union[str, Any]=0.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : List[str]=1 , lowerCamelCase_ : Optional[Any]=0 , lowerCamelCase_ : List[Any]=2 , **lowerCamelCase_ : Union[str, Any] , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = d_model
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = activation_function
UpperCamelCase = max_position_embeddings
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = init_std
UpperCamelCase = decoder_layerdrop
UpperCamelCase = use_cache
UpperCamelCase = scale_embedding
UpperCamelCase = use_learned_position_embeddings
UpperCamelCase = layernorm_embedding
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
| 343 | 1 |
from numpy import exp, pi, sqrt
def lowercase( UpperCamelCase_ , UpperCamelCase_ = 0.0 , UpperCamelCase_ = 1.0 ) -> int:
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase ):
__lowerCAmelCase = """swin"""
__lowerCAmelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Any , lowerCamelCase_ : Optional[int]=224 , lowerCamelCase_ : Union[str, Any]=4 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[Any]=96 , lowerCamelCase_ : int=[2, 2, 6, 2] , lowerCamelCase_ : Dict=[3, 6, 12, 24] , lowerCamelCase_ : str=7 , lowerCamelCase_ : Tuple=4.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any="gelu" , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : Optional[Any]=0.0_2 , lowerCamelCase_ : str=1E-5 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : str=None , lowerCamelCase_ : Any=None , **lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = depths
UpperCamelCase = len(lowerCamelCase_ )
UpperCamelCase = num_heads
UpperCamelCase = window_size
UpperCamelCase = mlp_ratio
UpperCamelCase = qkv_bias
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = drop_path_rate
UpperCamelCase = hidden_act
UpperCamelCase = use_absolute_embeddings
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
UpperCamelCase = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
UpperCamelCase , UpperCamelCase = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = version.parse("""1.11""" )
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
return 1E-4
| 343 | 1 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = 42
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase ):
@register_to_config
def __init__( self : int , lowerCamelCase_ : int = 32 , lowerCamelCase_ : int = 64 , lowerCamelCase_ : int = 20 , lowerCamelCase_ : int = 768 , lowerCamelCase_ : str=77 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : str = "silu" , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[str] = "linear" , lowerCamelCase_ : Optional[str] = "prd" , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[int] = None , ):
"""simple docstring"""
super().__init__()
UpperCamelCase = num_attention_heads
UpperCamelCase = attention_head_dim
UpperCamelCase = num_attention_heads * attention_head_dim
UpperCamelCase = additional_embeddings
UpperCamelCase = time_embed_dim or inner_dim
UpperCamelCase = embedding_proj_dim or embedding_dim
UpperCamelCase = clip_embed_dim or embedding_dim
UpperCamelCase = Timesteps(lowerCamelCase_ , lowerCamelCase_ , 0 )
UpperCamelCase = TimestepEmbedding(lowerCamelCase_ , lowerCamelCase_ , out_dim=lowerCamelCase_ , act_fn=lowerCamelCase_ )
UpperCamelCase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
if embedding_proj_norm_type is None:
UpperCamelCase = None
elif embedding_proj_norm_type == "layer":
UpperCamelCase = nn.LayerNorm(lowerCamelCase_ )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
UpperCamelCase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
if encoder_hid_proj_type is None:
UpperCamelCase = None
elif encoder_hid_proj_type == "linear":
UpperCamelCase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
UpperCamelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCamelCase_ ) )
if added_emb_type == "prd":
UpperCamelCase = nn.Parameter(torch.zeros(1 , 1 , lowerCamelCase_ ) )
elif added_emb_type is None:
UpperCamelCase = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
UpperCamelCase = nn.ModuleList(
[
BasicTransformerBlock(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , dropout=lowerCamelCase_ , activation_fn="""gelu""" , attention_bias=lowerCamelCase_ , )
for d in range(lowerCamelCase_ )
] )
if norm_in_type == "layer":
UpperCamelCase = nn.LayerNorm(lowerCamelCase_ )
elif norm_in_type is None:
UpperCamelCase = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
UpperCamelCase = nn.LayerNorm(lowerCamelCase_ )
UpperCamelCase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
UpperCamelCase = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , lowerCamelCase_ , persistent=lowerCamelCase_ )
UpperCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase_ ) )
UpperCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase_ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = {}
def fn_recursive_add_processors(lowerCamelCase_ : str , lowerCamelCase_ : torch.nn.Module , lowerCamelCase_ : Dict[str, AttentionProcessor] ):
if hasattr(lowerCamelCase_ , """set_processor""" ):
UpperCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , lowerCamelCase_ , lowerCamelCase_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return processors
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
"""simple docstring"""
UpperCamelCase = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(lowerCamelCase_ )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(lowerCamelCase_ : str , lowerCamelCase_ : torch.nn.Module , lowerCamelCase_ : Union[str, Any] ):
if hasattr(lowerCamelCase_ , """set_processor""" ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
module.set_processor(lowerCamelCase_ )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , lowerCamelCase_ , lowerCamelCase_ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : Union[torch.Tensor, float, int] , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[torch.BoolTensor] = None , lowerCamelCase_ : bool = True , ):
"""simple docstring"""
UpperCamelCase = hidden_states.shape[0]
UpperCamelCase = timestep
if not torch.is_tensor(lowerCamelCase_ ):
UpperCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCamelCase_ ) and len(timesteps.shape ) == 0:
UpperCamelCase = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase = timesteps * torch.ones(lowerCamelCase_ , dtype=timesteps.dtype , device=timesteps.device )
UpperCamelCase = self.time_proj(lowerCamelCase_ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCamelCase = timesteps_projected.to(dtype=self.dtype )
UpperCamelCase = self.time_embedding(lowerCamelCase_ )
if self.embedding_proj_norm is not None:
UpperCamelCase = self.embedding_proj_norm(lowerCamelCase_ )
UpperCamelCase = self.embedding_proj(lowerCamelCase_ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCamelCase = self.encoder_hidden_states_proj(lowerCamelCase_ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
UpperCamelCase = self.proj_in(lowerCamelCase_ )
UpperCamelCase = self.positional_embedding.to(hidden_states.dtype )
UpperCamelCase = []
UpperCamelCase = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCamelCase_ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCamelCase = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCamelCase = hidden_states[:, None, :]
UpperCamelCase = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCamelCase = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCamelCase_ , -1 , -1 )
additional_embeds.append(lowerCamelCase_ )
UpperCamelCase = torch.cat(
lowerCamelCase_ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCamelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCamelCase = F.pad(
lowerCamelCase_ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
UpperCamelCase = hidden_states + positional_embeddings
if attention_mask is not None:
UpperCamelCase = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
UpperCamelCase = F.pad(lowerCamelCase_ , (0, self.additional_embeddings) , value=0.0 )
UpperCamelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCamelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
UpperCamelCase = self.norm_in(lowerCamelCase_ )
for block in self.transformer_blocks:
UpperCamelCase = block(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
UpperCamelCase = self.norm_out(lowerCamelCase_ )
if self.prd_embedding is not None:
UpperCamelCase = hidden_states[:, -1]
else:
UpperCamelCase = hidden_states[:, additional_embeddings_len:]
UpperCamelCase = self.proj_to_clip_embeddings(lowerCamelCase_ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 343 | import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_SCREAMING_SNAKE_CASE = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
_SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary"""
)
_SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(6_4, 6_4)
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image)
_SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0)
_SCREAMING_SNAKE_CASE = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_SCREAMING_SNAKE_CASE = """Normal"""
if result[0][0] == 1:
_SCREAMING_SNAKE_CASE = """Abnormality detected"""
| 343 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase( UpperCamelCase_ ) -> Dict:
'''simple docstring'''
UpperCamelCase = DPTConfig()
if "large" in checkpoint_url:
UpperCamelCase = 1024
UpperCamelCase = 4096
UpperCamelCase = 24
UpperCamelCase = 16
UpperCamelCase = [5, 11, 17, 23]
UpperCamelCase = [256, 512, 1024, 1024]
UpperCamelCase = (1, 384, 384)
if "ade" in checkpoint_url:
UpperCamelCase = True
UpperCamelCase = 150
UpperCamelCase = """huggingface/label-files"""
UpperCamelCase = """ade20k-id2label.json"""
UpperCamelCase = json.load(open(cached_download(hf_hub_url(UpperCamelCase_ , UpperCamelCase_ , repo_type="""dataset""" ) ) , """r""" ) )
UpperCamelCase = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
UpperCamelCase = [1, 150, 480, 480]
return config, expected_shape
def lowercase( UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def lowercase( UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCamelCase = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
UpperCamelCase = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
UpperCamelCase = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
UpperCamelCase = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
UpperCamelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
UpperCamelCase = name.replace("""proj""" , """projection""" )
if "blocks" in name:
UpperCamelCase = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
UpperCamelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
UpperCamelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
UpperCamelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
UpperCamelCase = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
UpperCamelCase = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
UpperCamelCase = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
UpperCamelCase = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
UpperCamelCase = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
UpperCamelCase = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
UpperCamelCase = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
UpperCamelCase = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCamelCase = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
UpperCamelCase = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
UpperCamelCase = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
UpperCamelCase = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
UpperCamelCase = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
UpperCamelCase = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCamelCase = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCamelCase = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCamelCase = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCamelCase = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCamelCase = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
UpperCamelCase = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
UpperCamelCase = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
UpperCamelCase = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
UpperCamelCase = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
UpperCamelCase = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
UpperCamelCase = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
UpperCamelCase = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
UpperCamelCase = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
UpperCamelCase = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
UpperCamelCase = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
UpperCamelCase = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
UpperCamelCase = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[: config.hidden_size, :]
UpperCamelCase = in_proj_bias[: config.hidden_size]
UpperCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase = in_proj_bias[-config.hidden_size :]
def lowercase( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = get_dpt_config(UpperCamelCase_ )
# load original state_dict from URL
UpperCamelCase = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(UpperCamelCase_ )
# rename keys
for key in state_dict.copy().keys():
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
# read in qkv matrices
read_in_q_k_v(UpperCamelCase_ , UpperCamelCase_ )
# load HuggingFace model
UpperCamelCase = DPTForSemanticSegmentation(UpperCamelCase_ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
# Check outputs on an image
UpperCamelCase = 480 if """ade""" in checkpoint_url else 384
UpperCamelCase = DPTImageProcessor(size=UpperCamelCase_ )
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(UpperCamelCase_ , return_tensors="""pt""" )
# forward pass
UpperCamelCase = model(**UpperCamelCase_ ).logits if """ade""" in checkpoint_url else model(**UpperCamelCase_ ).predicted_depth
# Assert logits
UpperCamelCase = torch.tensor([[6.3_1_9_9, 6.3_6_2_9, 6.4_1_4_8], [6.3_8_5_0, 6.3_6_1_5, 6.4_1_6_6], [6.3_5_1_9, 6.3_1_7_6, 6.3_5_7_5]] )
if "ade" in checkpoint_url:
UpperCamelCase = torch.tensor([[4.0_4_8_0, 4.2_4_2_0, 4.4_3_6_0], [4.3_1_2_4, 4.5_6_9_3, 4.8_2_6_1], [4.5_7_6_8, 4.8_9_6_5, 5.2_1_6_3]] )
assert outputs.shape == torch.Size(UpperCamelCase_ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , UpperCamelCase_ , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , UpperCamelCase_ )
)
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(UpperCamelCase_ , UpperCamelCase_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=UpperCamelCase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCamelCase_ , UpperCamelCase_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=UpperCamelCase_ , )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 343 | from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
pass
class SCREAMING_SNAKE_CASE_ :
def __init__( self : List[Any] , lowerCamelCase_ : Any ):
"""simple docstring"""
UpperCamelCase = data
UpperCamelCase = None
def __iter__( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self
UpperCamelCase = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(lowerCamelCase_ )
yield node.data
UpperCamelCase = node.next_node
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = Node(1)
_SCREAMING_SNAKE_CASE = Node(2)
_SCREAMING_SNAKE_CASE = Node(3)
_SCREAMING_SNAKE_CASE = Node(4)
print(root_node.has_loop) # False
_SCREAMING_SNAKE_CASE = root_node.next_node
print(root_node.has_loop) # True
_SCREAMING_SNAKE_CASE = Node(5)
_SCREAMING_SNAKE_CASE = Node(6)
_SCREAMING_SNAKE_CASE = Node(5)
_SCREAMING_SNAKE_CASE = Node(6)
print(root_node.has_loop) # False
_SCREAMING_SNAKE_CASE = Node(1)
print(root_node.has_loop) # False
| 343 | 1 |
_SCREAMING_SNAKE_CASE = {str(digit): digit**5 for digit in range(1_0)}
def lowercase( UpperCamelCase_ ) -> int:
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(UpperCamelCase_ ) )
def lowercase( ) -> int:
'''simple docstring'''
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(UpperCamelCase_ ) )
if __name__ == "__main__":
print(solution())
| 343 | import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=lowerCamelCase_ , )
assert hasattr(self , """env""" )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCamelCase = {
"""enabled""": True,
"""processes_per_host""": 8,
}
UpperCamelCase = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
UpperCamelCase = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
UpperCamelCase = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=lowerCamelCase_ , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase_ , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=lowerCamelCase_ , py_version="""py36""" , )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
TrainingJobAnalytics(lowerCamelCase_ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = self.create_estimator(lowerCamelCase_ )
# run training
estimator.fit()
# result dataframe
UpperCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , lowerCamelCase_ )
| 343 | 1 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
_SCREAMING_SNAKE_CASE = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
_SCREAMING_SNAKE_CASE = {
"""jukebox""": 5_1_2,
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_LYRIC_TOKENS_SIZES
__lowerCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : List[Any]=["v3", "v2", "v2"] , lowerCamelCase_ : Tuple=512 , lowerCamelCase_ : Optional[int]=5 , lowerCamelCase_ : str="<|endoftext|>" , **lowerCamelCase_ : Any , ):
"""simple docstring"""
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token
super().__init__(
unk_token=lowerCamelCase_ , n_genres=lowerCamelCase_ , version=lowerCamelCase_ , max_n_lyric_tokens=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = version
UpperCamelCase = max_n_lyric_tokens
UpperCamelCase = n_genres
with open(lowerCamelCase_ , encoding="""utf-8""" ) as vocab_handle:
UpperCamelCase = json.load(lowerCamelCase_ )
with open(lowerCamelCase_ , encoding="""utf-8""" ) as vocab_handle:
UpperCamelCase = json.load(lowerCamelCase_ )
with open(lowerCamelCase_ , encoding="""utf-8""" ) as vocab_handle:
UpperCamelCase = json.load(lowerCamelCase_ )
UpperCamelCase = R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
UpperCamelCase = oov.replace(R"""\-'""" , R"""\-+'""" )
UpperCamelCase = regex.compile(lowerCamelCase_ )
UpperCamelCase = {v: k for k, v in self.artists_encoder.items()}
UpperCamelCase = {v: k for k, v in self.genres_encoder.items()}
UpperCamelCase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = [self.artists_encoder.get(lowerCamelCase_ , 0 ) for artist in list_artists]
for genres in range(len(lowerCamelCase_ ) ):
UpperCamelCase = [self.genres_encoder.get(lowerCamelCase_ , 0 ) for genre in list_genres[genres]]
UpperCamelCase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
UpperCamelCase = [[self.lyrics_encoder.get(lowerCamelCase_ , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
return list(lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , **lowerCamelCase_ : Any ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.prepare_for_tokenization(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = self._tokenize(lowerCamelCase_ )
return artist, genre, lyrics
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : bool = False ):
"""simple docstring"""
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
UpperCamelCase = artists[idx].lower()
UpperCamelCase = [genres[idx].lower()]
else:
UpperCamelCase = self._normalize(artists[idx] ) + """.v2"""
UpperCamelCase = [
self._normalize(lowerCamelCase_ ) + """.v2""" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
UpperCamelCase = regex.compile(R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" )
UpperCamelCase = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
UpperCamelCase = {vocab[index]: index + 1 for index in range(len(lowerCamelCase_ ) )}
UpperCamelCase = 0
UpperCamelCase = len(lowerCamelCase_ ) + 1
UpperCamelCase = self.vocab
UpperCamelCase = {v: k for k, v in self.vocab.items()}
UpperCamelCase = """"""
else:
UpperCamelCase = regex.compile(R"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" )
UpperCamelCase = self._run_strip_accents(lowerCamelCase_ )
UpperCamelCase = lyrics.replace("""\\""" , """\n""" )
UpperCamelCase = self.out_of_vocab.sub("""""" , lowerCamelCase_ ), [], []
return artists, genres, lyrics
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCamelCase = unicodedata.normalize("""NFD""" , lowerCamelCase_ )
UpperCamelCase = []
for char in text:
UpperCamelCase = unicodedata.category(lowerCamelCase_ )
if cat == "Mn":
continue
output.append(lowerCamelCase_ )
return "".join(lowerCamelCase_ )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = (
[chr(lowerCamelCase_ ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(lowerCamelCase_ ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(lowerCamelCase_ ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["""."""]
)
UpperCamelCase = frozenset(lowerCamelCase_ )
UpperCamelCase = re.compile(R"""_+""" )
UpperCamelCase = """""".join([c if c in accepted else """_""" for c in text.lower()] )
UpperCamelCase = pattern.sub("""_""" , lowerCamelCase_ ).strip("""_""" )
return text
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[str] ):
"""simple docstring"""
return " ".join(lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Union[str, TensorType]] = None , lowerCamelCase_ : bool = False ):
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase = TensorType(lowerCamelCase_ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
UpperCamelCase = tf.constant
UpperCamelCase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
UpperCamelCase = torch.tensor
UpperCamelCase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
UpperCamelCase = jnp.array
UpperCamelCase = _is_jax
else:
UpperCamelCase = np.asarray
UpperCamelCase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
UpperCamelCase = [inputs]
if not is_tensor(lowerCamelCase_ ):
UpperCamelCase = as_tensor(lowerCamelCase_ )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" )
return inputs
def __call__( self : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any]="" , lowerCamelCase_ : Dict="pt" ):
"""simple docstring"""
UpperCamelCase = [0, 0, 0]
UpperCamelCase = [artist] * len(self.version )
UpperCamelCase = [genres] * len(self.version )
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.tokenize(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase = self._convert_token_to_id(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = [-INFINITY] * len(full_tokens[-1] )
UpperCamelCase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=lowerCamelCase_ )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=lowerCamelCase_ ) )
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=lowerCamelCase_ ) )
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=lowerCamelCase_ ) )
return (artists_file, genres_file, lyrics_file)
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = self.artists_decoder.get(lowerCamelCase_ )
UpperCamelCase = [self.genres_decoder.get(lowerCamelCase_ ) for genre in genres_index]
UpperCamelCase = [self.lyrics_decoder.get(lowerCamelCase_ ) for character in lyric_index]
return artist, genres, lyrics
| 343 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE = {
"""configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""ConvNextFeatureExtractor"""]
_SCREAMING_SNAKE_CASE = ["""ConvNextImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvNextForImageClassification""",
"""ConvNextModel""",
"""ConvNextPreTrainedModel""",
"""ConvNextBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""TFConvNextForImageClassification""",
"""TFConvNextModel""",
"""TFConvNextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 343 | 1 |
def lowercase( ) -> int:
'''simple docstring'''
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(UpperCamelCase_ , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 343 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = ShapEPipeline
__lowerCAmelCase = ["""prompt"""]
__lowerCAmelCase = ["""prompt"""]
__lowerCAmelCase = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
__lowerCAmelCase = False
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return 8
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowerCamelCase_ )
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
UpperCamelCase = PriorTransformer(**lowerCamelCase_ )
return model
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
UpperCamelCase = ShapERenderer(**lowerCamelCase_ )
return model
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.dummy_prior
UpperCamelCase = self.dummy_text_encoder
UpperCamelCase = self.dummy_tokenizer
UpperCamelCase = self.dummy_renderer
UpperCamelCase = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=lowerCamelCase_ , clip_sample=lowerCamelCase_ , clip_sample_range=1.0 , )
UpperCamelCase = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any]=0 ):
"""simple docstring"""
if str(lowerCamelCase_ ).startswith("""mps""" ):
UpperCamelCase = torch.manual_seed(lowerCamelCase_ )
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
UpperCamelCase = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = """cpu"""
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = self.pipeline_class(**lowerCamelCase_ )
UpperCamelCase = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
UpperCamelCase = output.images[0]
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCamelCase = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = torch_device == """cpu"""
UpperCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCamelCase_ , relax_max_difference=lowerCamelCase_ , )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = self.pipeline_class(**lowerCamelCase_ )
UpperCamelCase = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = 1
UpperCamelCase = 2
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
UpperCamelCase = batch_size * [inputs[key]]
UpperCamelCase = pipe(**lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
UpperCamelCase = ShapEPipeline.from_pretrained("""openai/shap-e""" )
UpperCamelCase = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCamelCase = pipe(
"""a shark""" , generator=lowerCamelCase_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
| 343 | 1 |
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> float:
'''simple docstring'''
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(UpperCamelCase_ ) * abs(UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 343 | from __future__ import annotations
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> list:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase , UpperCamelCase = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
UpperCamelCase = result + left + right
return input_list
def lowercase( UpperCamelCase_ ) -> list:
'''simple docstring'''
if len(UpperCamelCase_ ) <= 1:
return input_list
UpperCamelCase = list(UpperCamelCase_ )
# iteration for two-way merging
UpperCamelCase = 2
while p <= len(UpperCamelCase_ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(UpperCamelCase_ ) , UpperCamelCase_ ):
UpperCamelCase = i
UpperCamelCase = i + p - 1
UpperCamelCase = (low + high + 1) // 2
UpperCamelCase = merge(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# final merge of last two parts
if p * 2 >= len(UpperCamelCase_ ):
UpperCamelCase = i
UpperCamelCase = merge(UpperCamelCase_ , 0 , UpperCamelCase_ , len(UpperCamelCase_ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""").strip()
if user_input == "":
_SCREAMING_SNAKE_CASE = []
else:
_SCREAMING_SNAKE_CASE = [int(item.strip()) for item in user_input.split(""",""")]
print(iter_merge_sort(unsorted))
| 343 | 1 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = get_activation("""swish""" )
self.assertIsInstance(lowerCamelCase_ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = get_activation("""silu""" )
self.assertIsInstance(lowerCamelCase_ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = get_activation("""mish""" )
self.assertIsInstance(lowerCamelCase_ , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = get_activation("""gelu""" )
self.assertIsInstance(lowerCamelCase_ , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 343 | import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=32 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : int=10 , lowerCamelCase_ : Optional[int]=[8, 16, 32, 64] , lowerCamelCase_ : List[str]=[1, 1, 2, 1] , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : List[Any]="relu" , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=None , lowerCamelCase_ : List[Any]=["stage2", "stage3", "stage4"] , lowerCamelCase_ : Optional[Any]=[2, 3, 4] , lowerCamelCase_ : List[Any]=1 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = embeddings_size
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = len(lowerCamelCase_ )
UpperCamelCase = out_features
UpperCamelCase = out_indices
UpperCamelCase = num_groups
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCamelCase = BitModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = BitForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = BitBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase = None
UpperCamelCase = BitBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__lowerCAmelCase = (
{"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = BitModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return
@unittest.skip(reason="""Bit does not output attentions""" )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(config=lowerCamelCase_ )
for name, module in model.named_modules():
if isinstance(lowerCamelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ):
UpperCamelCase = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase = layer_type
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = BitModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase( ) -> Any:
'''simple docstring'''
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**lowerCamelCase_ )
# verify the logits
UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
UpperCamelCase = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (BitBackbone,) if is_torch_available() else ()
__lowerCAmelCase = BitConfig
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = BitModelTester(self )
| 343 | 1 |
def lowercase( UpperCamelCase_ ) -> bool:
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
_SCREAMING_SNAKE_CASE = int(input("""Enter number: """).strip())
print(F'''{number} is {"" if perfect(number) else "not "}a Perfect Number.''')
| 343 | from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=3 , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[int]=10 , lowerCamelCase_ : List[str]=[10, 20, 30, 40] , lowerCamelCase_ : Tuple=[1, 1, 2, 1] , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : Tuple="relu" , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Dict=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = embeddings_size
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = len(lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = TFResNetModel(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = TFResNetForImageClassification(lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__lowerCAmelCase = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = TFResNetModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : str ):
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase = layer_type
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFResNetModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase( ) -> Any:
'''simple docstring'''
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" )
# forward pass
UpperCamelCase = model(**lowerCamelCase_ )
# verify the logits
UpperCamelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
UpperCamelCase = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCamelCase_ , atol=1E-4 ) )
| 343 | 1 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = ["""vqvae"""]
def __init__( self : List[Any] , lowerCamelCase_ : AutoencoderKL , lowerCamelCase_ : UNetaDConditionModel , lowerCamelCase_ : Mel , lowerCamelCase_ : Union[DDIMScheduler, DDPMScheduler] , ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , mel=lowerCamelCase_ , vqvae=lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return 50 if isinstance(self.scheduler , lowerCamelCase_ ) else 1000
@torch.no_grad()
def __call__( self : str , lowerCamelCase_ : int = 1 , lowerCamelCase_ : str = None , lowerCamelCase_ : np.ndarray = None , lowerCamelCase_ : int = 0 , lowerCamelCase_ : int = 0 , lowerCamelCase_ : int = None , lowerCamelCase_ : torch.Generator = None , lowerCamelCase_ : float = 0 , lowerCamelCase_ : float = 0 , lowerCamelCase_ : torch.Generator = None , lowerCamelCase_ : float = 0 , lowerCamelCase_ : torch.Tensor = None , lowerCamelCase_ : torch.Tensor = None , lowerCamelCase_ : Optional[Any]=True , ):
"""simple docstring"""
UpperCamelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowerCamelCase_ )
UpperCamelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
UpperCamelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
UpperCamelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowerCamelCase_ , device=self.device , )
UpperCamelCase = noise
UpperCamelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = self.mel.audio_slice_to_image(lowerCamelCase_ )
UpperCamelCase = np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
UpperCamelCase = (input_image / 255) * 2 - 1
UpperCamelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
UpperCamelCase = self.vqvae.encode(torch.unsqueeze(lowerCamelCase_ , 0 ) ).latent_dist.sample(
generator=lowerCamelCase_ )[0]
UpperCamelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
UpperCamelCase = self.scheduler.add_noise(lowerCamelCase_ , lowerCamelCase_ , self.scheduler.timesteps[start_step - 1] )
UpperCamelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
UpperCamelCase = int(mask_start_secs * pixels_per_second )
UpperCamelCase = int(mask_end_secs * pixels_per_second )
UpperCamelCase = self.scheduler.add_noise(lowerCamelCase_ , lowerCamelCase_ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowerCamelCase_ ):
UpperCamelCase = self.unet(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )["""sample"""]
else:
UpperCamelCase = self.unet(lowerCamelCase_ , lowerCamelCase_ )["""sample"""]
if isinstance(self.scheduler , lowerCamelCase_ ):
UpperCamelCase = self.scheduler.step(
model_output=lowerCamelCase_ , timestep=lowerCamelCase_ , sample=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , )["""prev_sample"""]
else:
UpperCamelCase = self.scheduler.step(
model_output=lowerCamelCase_ , timestep=lowerCamelCase_ , sample=lowerCamelCase_ , generator=lowerCamelCase_ , )["""prev_sample"""]
if mask is not None:
if mask_start > 0:
UpperCamelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
UpperCamelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
UpperCamelCase = 1 / self.vqvae.config.scaling_factor * images
UpperCamelCase = self.vqvae.decode(lowerCamelCase_ )["""sample"""]
UpperCamelCase = (images / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
UpperCamelCase = (images * 255).round().astype("""uint8""" )
UpperCamelCase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowerCamelCase_ , mode="""RGB""" ).convert("""L""" ) for _ in images) )
UpperCamelCase = [self.mel.image_to_audio(lowerCamelCase_ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowerCamelCase_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowerCamelCase_ ) )
@torch.no_grad()
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : List[Image.Image] , lowerCamelCase_ : int = 50 ):
"""simple docstring"""
assert isinstance(self.scheduler , lowerCamelCase_ )
self.scheduler.set_timesteps(lowerCamelCase_ )
UpperCamelCase = np.array(
[np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
UpperCamelCase = (sample / 255) * 2 - 1
UpperCamelCase = torch.Tensor(lowerCamelCase_ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
UpperCamelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
UpperCamelCase = self.scheduler.alphas_cumprod[t]
UpperCamelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
UpperCamelCase = 1 - alpha_prod_t
UpperCamelCase = self.unet(lowerCamelCase_ , lowerCamelCase_ )["""sample"""]
UpperCamelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
UpperCamelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
UpperCamelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def lowerCamelCase_ ( lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : float ):
"""simple docstring"""
UpperCamelCase = acos(torch.dot(torch.flatten(lowerCamelCase_ ) , torch.flatten(lowerCamelCase_ ) ) / torch.norm(lowerCamelCase_ ) / torch.norm(lowerCamelCase_ ) )
return sin((1 - alpha) * theta ) * xa / sin(lowerCamelCase_ ) + sin(alpha * theta ) * xa / sin(lowerCamelCase_ )
| 343 | import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_SCREAMING_SNAKE_CASE = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
def lowercase( UpperCamelCase_ ) -> Any:
'''simple docstring'''
UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
UpperCamelCase = value
else:
UpperCamelCase = value
return new_state_dict
def lowercase( UpperCamelCase_ , UpperCamelCase_=False ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = """"""
if is_panoptic:
UpperCamelCase = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
def lowercase( ) -> Any:
'''simple docstring'''
UpperCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any:
'''simple docstring'''
UpperCamelCase = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCamelCase = """resnet101"""
if "dc5" in model_name:
UpperCamelCase = True
UpperCamelCase = """panoptic""" in model_name
if is_panoptic:
UpperCamelCase = 250
else:
UpperCamelCase = 91
UpperCamelCase = """huggingface/label-files"""
UpperCamelCase = """coco-detection-id2label.json"""
UpperCamelCase = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
# load image processor
UpperCamelCase = """coco_panoptic""" if is_panoptic else """coco_detection"""
UpperCamelCase = ConditionalDetrImageProcessor(format=UpperCamelCase_ )
# prepare image
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=UpperCamelCase_ , return_tensors="""pt""" )
UpperCamelCase = encoding["""pixel_values"""]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
UpperCamelCase = torch.hub.load("""DeppMeng/ConditionalDETR""" , UpperCamelCase_ , pretrained=UpperCamelCase_ ).eval()
UpperCamelCase = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCamelCase = """conditional_detr.""" + src
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = rename_backbone_keys(UpperCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase_ , is_panoptic=UpperCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
# finally, create HuggingFace model and load state dict
UpperCamelCase = ConditionalDetrForSegmentation(UpperCamelCase_ ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
model.push_to_hub(repo_id=UpperCamelCase_ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
UpperCamelCase = conditional_detr(UpperCamelCase_ )
UpperCamelCase = model(UpperCamelCase_ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 343 | 1 |
from __future__ import annotations
def lowercase( UpperCamelCase_ ) -> None:
'''simple docstring'''
create_state_space_tree(UpperCamelCase_ , [] , 0 , [0 for i in range(len(UpperCamelCase_ ) )] )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> None:
'''simple docstring'''
if index == len(UpperCamelCase_ ):
print(UpperCamelCase_ )
return
for i in range(len(UpperCamelCase_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCamelCase = True
create_state_space_tree(UpperCamelCase_ , UpperCamelCase_ , index + 1 , UpperCamelCase_ )
current_sequence.pop()
UpperCamelCase = False
_SCREAMING_SNAKE_CASE = [3, 1, 2, 4]
generate_all_permutations(sequence)
_SCREAMING_SNAKE_CASE = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 343 | from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Dict=13 , lowerCamelCase_ : str=30 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=3 , lowerCamelCase_ : Any=True , lowerCamelCase_ : int=True , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : int=4 , lowerCamelCase_ : str=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : List[Any]=10 , lowerCamelCase_ : List[Any]=0.0_2 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[Any]=0.6 , lowerCamelCase_ : Optional[Any]=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = mask_ratio
UpperCamelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = TFViTMAEModel(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = TFViTMAEForPreTraining(lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ )
# expected sequence length = num_patches
UpperCamelCase = (self.image_size // self.patch_size) ** 2
UpperCamelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = TFViTMAEForPreTraining(lowerCamelCase_ )
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ )
UpperCamelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__lowerCAmelCase = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = TFViTMAEModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
UpperCamelCase = copy.deepcopy(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ )
UpperCamelCase = outputs_dict[0].numpy()
UpperCamelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(lowerCamelCase_ : List[Any] ):
UpperCamelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(lowerCamelCase_ ):
UpperCamelCase = v.numpy()
else:
UpperCamelCase = np.array(lowerCamelCase_ )
return inputs_np_dict
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = prepare_numpy_arrays(lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ )
self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase = tf.constant(lowerCamelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase = tf_noise
super().check_pt_tf_models(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(lowerCamelCase_ )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(lowerCamelCase_ , lowerCamelCase_ ),)
if isinstance(lowerCamelCase_ , lowerCamelCase_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(lowerCamelCase_ , """_keras_serializable""" , lowerCamelCase_ )
}
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase = tf.convert_to_tensor(lowerCamelCase_ )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
UpperCamelCase = main_layer_class(lowerCamelCase_ )
UpperCamelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
UpperCamelCase = tf.keras.Model(lowerCamelCase_ , outputs=main_layer(lowerCamelCase_ ) )
UpperCamelCase = model(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = os.path.join(lowerCamelCase_ , """keras_model.h5""" )
model.save(lowerCamelCase_ )
UpperCamelCase = tf.keras.models.load_model(
lowerCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(lowerCamelCase_ , tf.keras.Model )
UpperCamelCase = model(lowerCamelCase_ )
self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
UpperCamelCase = outputs.last_hidden_state.numpy()
UpperCamelCase = 0
else:
UpperCamelCase = outputs.logits.numpy()
UpperCamelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ , saved_model=lowerCamelCase_ )
UpperCamelCase = model_class.from_pretrained(lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
UpperCamelCase = after_outputs["""last_hidden_state"""].numpy()
UpperCamelCase = 0
else:
UpperCamelCase = after_outputs["""logits"""].numpy()
UpperCamelCase = 0
UpperCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase_ , 1E-5 )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
UpperCamelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(lowerCamelCase_ )
UpperCamelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
UpperCamelCase = model_class.from_config(model.config )
UpperCamelCase = new_model(lowerCamelCase_ ) # Build model
new_model.set_weights(model.get_weights() )
UpperCamelCase = new_model(lowerCamelCase_ , noise=lowerCamelCase_ )
self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
pass
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase( ) -> int:
'''simple docstring'''
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase = ViTMAEConfig()
UpperCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ )
# verify the logits
UpperCamelCase = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
UpperCamelCase = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
| 343 | 1 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = pd.read_csv("""sample_data.csv""", header=None)
_SCREAMING_SNAKE_CASE = df.shape[:1][0]
# If you're using some other dataset input the target column
_SCREAMING_SNAKE_CASE = df.iloc[:, 1:2]
_SCREAMING_SNAKE_CASE = actual_data.values.reshape(len_data, 1)
_SCREAMING_SNAKE_CASE = MinMaxScaler().fit_transform(actual_data)
_SCREAMING_SNAKE_CASE = 1_0
_SCREAMING_SNAKE_CASE = 5
_SCREAMING_SNAKE_CASE = 2_0
_SCREAMING_SNAKE_CASE = len_data - periods * look_back
_SCREAMING_SNAKE_CASE = actual_data[:division]
_SCREAMING_SNAKE_CASE = actual_data[division - look_back :]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = [], []
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_SCREAMING_SNAKE_CASE = np.array(train_x)
_SCREAMING_SNAKE_CASE = np.array(test_x)
_SCREAMING_SNAKE_CASE = np.array([list(i.ravel()) for i in train_y])
_SCREAMING_SNAKE_CASE = np.array([list(i.ravel()) for i in test_y])
_SCREAMING_SNAKE_CASE = Sequential()
model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(6_4, input_shape=(1_2_8, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
_SCREAMING_SNAKE_CASE = model.fit(
x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4
)
_SCREAMING_SNAKE_CASE = model.predict(x_test)
| 343 | def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool:
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(UpperCamelCase_ ) )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool:
'''simple docstring'''
# Base Case
if index == len(UpperCamelCase_ ):
return True
# Recursive Step
for i in range(UpperCamelCase_ ):
if valid_coloring(graph[index] , UpperCamelCase_ , UpperCamelCase_ ):
# Color current vertex
UpperCamelCase = i
# Validate coloring
if util_color(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , index + 1 ):
return True
# Backtrack
UpperCamelCase = -1
return False
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> list[int]:
'''simple docstring'''
UpperCamelCase = [-1] * len(UpperCamelCase_ )
if util_color(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , 0 ):
return colored_vertices
return []
| 343 | 1 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_SCREAMING_SNAKE_CASE = ["""text""", """image""", """audio"""]
def lowercase( UpperCamelCase_ ) -> Dict:
'''simple docstring'''
UpperCamelCase = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
inputs.append(create_inputs(UpperCamelCase_ ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = []
for output in outputs:
if isinstance(UpperCamelCase_ , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(UpperCamelCase_ , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(UpperCamelCase_ , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class SCREAMING_SNAKE_CASE_ :
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
UpperCamelCase = self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCamelCase_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
UpperCamelCase = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = create_inputs(self.tool.inputs )
UpperCamelCase = self.tool(*lowerCamelCase_ )
# There is a single output
if len(self.tool.outputs ) == 1:
UpperCamelCase = [outputs]
self.assertListEqual(output_types(lowerCamelCase_ ) , self.tool.outputs )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = create_inputs(self.tool.inputs )
UpperCamelCase = self.tool(*lowerCamelCase_ )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase = [outputs]
self.assertEqual(len(lowerCamelCase_ ) , len(self.tool.outputs ) )
for output, output_type in zip(lowerCamelCase_ , self.tool.outputs ):
UpperCamelCase = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCamelCase_ , lowerCamelCase_ ) )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = create_inputs(self.tool.inputs )
UpperCamelCase = []
for _input, input_type in zip(lowerCamelCase_ , self.tool.inputs ):
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
UpperCamelCase = self.tool(*lowerCamelCase_ )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase = [outputs]
self.assertEqual(len(lowerCamelCase_ ) , len(self.tool.outputs ) )
| 343 | import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
_SCREAMING_SNAKE_CASE = {
"""allenai/led-base-16384""": 1_6_3_8_4,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase( ) -> List[str]:
'''simple docstring'''
UpperCamelCase = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCamelCase = bs[:]
UpperCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase_ )
cs.append(2**8 + n )
n += 1
UpperCamelCase = [chr(UpperCamelCase_ ) for n in cs]
return dict(zip(UpperCamelCase_ , UpperCamelCase_ ) )
def lowercase( UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
UpperCamelCase = set()
UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase = char
return pairs
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str="replace" , lowerCamelCase_ : Any="<s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : str="<s>" , lowerCamelCase_ : str="<unk>" , lowerCamelCase_ : int="<pad>" , lowerCamelCase_ : List[str]="<mask>" , lowerCamelCase_ : str=False , **lowerCamelCase_ : str , ):
"""simple docstring"""
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding="""utf-8""" ) as vocab_handle:
UpperCamelCase = json.load(lowerCamelCase_ )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = errors # how to handle errors in decoding
UpperCamelCase = bytes_to_unicode()
UpperCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ , encoding="""utf-8""" ) as merges_handle:
UpperCamelCase = merges_handle.read().split("""\n""" )[1:-1]
UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
UpperCamelCase = {}
UpperCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return len(self.encoder )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Dict ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase = tuple(lowerCamelCase_ )
UpperCamelCase = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
UpperCamelCase = min(lowerCamelCase_ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase = bigram
UpperCamelCase = []
UpperCamelCase = 0
while i < len(lowerCamelCase_ ):
try:
UpperCamelCase = word.index(lowerCamelCase_ , lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase = tuple(lowerCamelCase_ )
UpperCamelCase = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
UpperCamelCase = get_pairs(lowerCamelCase_ )
UpperCamelCase = """ """.join(lowerCamelCase_ )
UpperCamelCase = word
return word
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = []
for token in re.findall(self.pat , lowerCamelCase_ ):
UpperCamelCase = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) )
return bpe_tokens
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : str ):
"""simple docstring"""
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Any ):
"""simple docstring"""
return self.decoder.get(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = """""".join(lowerCamelCase_ )
UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def lowerCamelCase_ ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + """\n""" )
UpperCamelCase = 0
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
UpperCamelCase = token_index
writer.write(""" """.join(lowerCamelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=False , **lowerCamelCase_ : Any ):
"""simple docstring"""
UpperCamelCase = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
UpperCamelCase = """ """ + text
return (text, kwargs)
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[bool] = None , ):
"""simple docstring"""
UpperCamelCase = super()._pad(
encoded_inputs=lowerCamelCase_ , max_length=lowerCamelCase_ , padding_strategy=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
# Load from model defaults
if return_attention_mask is None:
UpperCamelCase = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCamelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCamelCase = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ )
if needs_to_be_padded:
UpperCamelCase = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCamelCase = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCamelCase = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 343 | 1 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def lowercase( UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
UpperCamelCase = k.replace(UpperCamelCase_ , UpperCamelCase_ )
if k.startswith("""encoder""" ):
UpperCamelCase = k.replace(""".attn""" , """.self_attn""" )
UpperCamelCase = k.replace("""norm1""" , """self_attn_layer_norm""" )
UpperCamelCase = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
UpperCamelCase = k.replace("""norm1""" , """self_attn_layer_norm""" )
UpperCamelCase = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
UpperCamelCase = k.replace("""norm3""" , """final_layer_norm""" )
return k
def lowercase( UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
UpperCamelCase = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
UpperCamelCase = sd.pop(UpperCamelCase_ )
UpperCamelCase = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
UpperCamelCase = v
_SCREAMING_SNAKE_CASE = ["""START"""]
@torch.no_grad()
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
UpperCamelCase = torch.load(UpperCamelCase_ , map_location="""cpu""" )
UpperCamelCase = model["""model"""]
UpperCamelCase = BlenderbotConfig.from_json_file(UpperCamelCase_ )
UpperCamelCase = BlenderbotForConditionalGeneration(UpperCamelCase_ )
UpperCamelCase = m.model.state_dict().keys()
UpperCamelCase = []
UpperCamelCase = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
UpperCamelCase = rename_state_dict_key(UpperCamelCase_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
UpperCamelCase = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCamelCase_ )
m.model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
m.half()
m.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 343 | import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_SCREAMING_SNAKE_CASE = (
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
_SCREAMING_SNAKE_CASE = (
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
_SCREAMING_SNAKE_CASE = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
_SCREAMING_SNAKE_CASE = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
_SCREAMING_SNAKE_CASE = (
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 1_4]),
("""2H 5D 3C AS 5S""", False, [1_4, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [1_4, 1_3, 1_2, 1_1, 1_0]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
_SCREAMING_SNAKE_CASE = (
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
_SCREAMING_SNAKE_CASE = (
("""JH AH TH KH QH""", 2_3),
("""JH 9H TH KH QH""", 2_2),
("""JC KH JS JD JH""", 2_1),
("""KH KC 3S 3H 3D""", 2_0),
("""8C 9C 5C 3C TC""", 1_9),
("""JS QS 9H TS KH""", 1_8),
("""7C 7S KH 2H 7H""", 1_7),
("""3C KH 5D 5S KH""", 1_6),
("""QH 8H KD JH 8S""", 1_5),
("""2D 6D 9D TH 7D""", 1_4),
)
def lowercase( ) -> Dict:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = randrange(len(UpperCamelCase_ ) ), randrange(len(UpperCamelCase_ ) )
UpperCamelCase = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
UpperCamelCase , UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowercase( UpperCamelCase_ = 100 ) -> List[Any]:
'''simple docstring'''
return (generate_random_hand() for _ in range(UpperCamelCase_ ))
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
UpperCamelCase = PokerHand(UpperCamelCase_ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected
def lowercase( ) -> Dict:
'''simple docstring'''
UpperCamelCase = [PokerHand(UpperCamelCase_ ) for hand in SORTED_HANDS]
UpperCamelCase = poker_hands.copy()
shuffle(UpperCamelCase_ )
UpperCamelCase = chain(sorted(UpperCamelCase_ ) )
for index, hand in enumerate(UpperCamelCase_ ):
assert hand == poker_hands[index]
def lowercase( ) -> Union[str, Any]:
'''simple docstring'''
# Test that five high straights are compared correctly.
UpperCamelCase = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=UpperCamelCase_ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowercase( ) -> str:
'''simple docstring'''
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
UpperCamelCase = PokerHand("""2C 4S AS 3D 5C""" )
UpperCamelCase = True
UpperCamelCase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowercase( ) -> int:
'''simple docstring'''
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
UpperCamelCase = 0
UpperCamelCase = os.path.abspath(os.path.dirname(UpperCamelCase_ ) )
UpperCamelCase = os.path.join(UpperCamelCase_ , """poker_hands.txt""" )
with open(UpperCamelCase_ ) as file_hand:
for line in file_hand:
UpperCamelCase = line[:14].strip()
UpperCamelCase = line[15:].strip()
UpperCamelCase , UpperCamelCase = PokerHand(UpperCamelCase_ ), PokerHand(UpperCamelCase_ )
UpperCamelCase = player.compare_with(UpperCamelCase_ )
if output == "Win":
answer += 1
assert answer == 376
| 343 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""OwlViTFeatureExtractor"""]
_SCREAMING_SNAKE_CASE = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 343 | import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = """xlnet"""
__lowerCAmelCase = ["""mems"""]
__lowerCAmelCase = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , lowerCamelCase_ : Any=3_2000 , lowerCamelCase_ : Dict=1024 , lowerCamelCase_ : List[str]=24 , lowerCamelCase_ : Dict=16 , lowerCamelCase_ : List[Any]=4096 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Union[str, Any]="bi" , lowerCamelCase_ : Optional[Any]=0.0_2 , lowerCamelCase_ : Optional[int]=1E-12 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : Union[str, Any]=512 , lowerCamelCase_ : Any=None , lowerCamelCase_ : str=True , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Optional[int]=-1 , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Union[str, Any]="last" , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : str="tanh" , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Dict=5 , lowerCamelCase_ : str=5 , lowerCamelCase_ : Optional[int]=5 , lowerCamelCase_ : Any=1 , lowerCamelCase_ : int=2 , **lowerCamelCase_ : List[Any] , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = d_model
UpperCamelCase = n_layer
UpperCamelCase = n_head
if d_model % n_head != 0:
raise ValueError(f"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
UpperCamelCase = d_model // n_head
UpperCamelCase = ff_activation
UpperCamelCase = d_inner
UpperCamelCase = untie_r
UpperCamelCase = attn_type
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = dropout
UpperCamelCase = mem_len
UpperCamelCase = reuse_len
UpperCamelCase = bi_data
UpperCamelCase = clamp_len
UpperCamelCase = same_length
UpperCamelCase = summary_type
UpperCamelCase = summary_use_proj
UpperCamelCase = summary_activation
UpperCamelCase = summary_last_dropout
UpperCamelCase = start_n_top
UpperCamelCase = end_n_top
UpperCamelCase = bos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"""The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"""
""" instead.""" , lowerCamelCase_ , )
UpperCamelCase = kwargs["""use_cache"""]
UpperCamelCase = use_mems_eval
UpperCamelCase = use_mems_train
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
@property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 343 | 1 |
from __future__ import annotations
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> list:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase , UpperCamelCase = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
UpperCamelCase = result + left + right
return input_list
def lowercase( UpperCamelCase_ ) -> list:
'''simple docstring'''
if len(UpperCamelCase_ ) <= 1:
return input_list
UpperCamelCase = list(UpperCamelCase_ )
# iteration for two-way merging
UpperCamelCase = 2
while p <= len(UpperCamelCase_ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(UpperCamelCase_ ) , UpperCamelCase_ ):
UpperCamelCase = i
UpperCamelCase = i + p - 1
UpperCamelCase = (low + high + 1) // 2
UpperCamelCase = merge(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# final merge of last two parts
if p * 2 >= len(UpperCamelCase_ ):
UpperCamelCase = i
UpperCamelCase = merge(UpperCamelCase_ , 0 , UpperCamelCase_ , len(UpperCamelCase_ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""").strip()
if user_input == "":
_SCREAMING_SNAKE_CASE = []
else:
_SCREAMING_SNAKE_CASE = [int(item.strip()) for item in user_input.split(""",""")]
print(iter_merge_sort(unsorted))
| 343 | import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures""")
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy-config.json""")
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = 0
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
UpperCamelCase = WavaVecaFeatureExtractor(**lowerCamelCase_ )
# save in new folder
model_config.save_pretrained(lowerCamelCase_ )
config.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
# make sure private variable is not incorrectly saved
UpperCamelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , revision="""aaaaaa""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
with self.assertRaises(lowerCamelCase_ ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase_ ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase_ ):
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCamelCase = CustomFeatureExtractor.from_pretrained(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = True
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# If remote code is not set, the default is to use local
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(lowerCamelCase_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 343 | 1 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
def __init__( self : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : str=13 , lowerCamelCase_ : str=7 , lowerCamelCase_ : str=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : Union[str, Any]=False , lowerCamelCase_ : Any=True , lowerCamelCase_ : Union[str, Any]=99 , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : List[Any]=5 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : str=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any=512 , lowerCamelCase_ : List[Any]=16 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Any=0.0_2 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Any=4 , lowerCamelCase_ : Union[str, Any]=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any ):
"""simple docstring"""
UpperCamelCase = DistilBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = DistilBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = DistilBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = DistilBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = DistilBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = self.num_choices
UpperCamelCase = DistilBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) = config_and_inputs
UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
__lowerCAmelCase = (
{
"""feature-extraction""": DistilBertModel,
"""fill-mask""": DistilBertForMaskedLM,
"""question-answering""": DistilBertForQuestionAnswering,
"""text-classification""": DistilBertForSequenceClassification,
"""token-classification""": DistilBertForTokenClassification,
"""zero-shot""": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = True
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = DistilBertModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , dim=37 )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = DistilBertModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@slow
@require_torch_gpu
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
UpperCamelCase = True
UpperCamelCase = model_class(config=lowerCamelCase_ )
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = torch.jit.trace(
lowerCamelCase_ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , """traced_model.pt""" ) )
UpperCamelCase = torch.jit.load(os.path.join(lowerCamelCase_ , """traced_model.pt""" ) , map_location=lowerCamelCase_ )
loaded(inputs_dict["""input_ids"""].to(lowerCamelCase_ ) , inputs_dict["""attention_mask"""].to(lowerCamelCase_ ) )
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = DistilBertModel.from_pretrained("""distilbert-base-uncased""" )
UpperCamelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )[0]
UpperCamelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowerCamelCase_ )
UpperCamelCase = torch.tensor(
[[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1E-4 ) )
| 343 | import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
def lowercase( UpperCamelCase_ ) -> Dict:
'''simple docstring'''
# word like '180' or '身高' or '神'
for char in word:
UpperCamelCase = ord(UpperCamelCase_ )
if not _is_chinese_char(UpperCamelCase_ ):
return 0
return 1
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = set()
for token in tokens:
UpperCamelCase = len(UpperCamelCase_ ) > 1 and is_chinese(UpperCamelCase_ )
if chinese_word:
word_set.add(UpperCamelCase_ )
UpperCamelCase = list(UpperCamelCase_ )
return word_list
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
UpperCamelCase = max([len(UpperCamelCase_ ) for w in chinese_word_set] )
UpperCamelCase = bert_tokens
UpperCamelCase , UpperCamelCase = 0, len(UpperCamelCase_ )
while start < end:
UpperCamelCase = True
if is_chinese(bert_word[start] ):
UpperCamelCase = min(end - start , UpperCamelCase_ )
for i in range(UpperCamelCase_ , 1 , -1 ):
UpperCamelCase = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCamelCase = """##""" + bert_word[j]
UpperCamelCase = start + i
UpperCamelCase = False
break
if single_word:
start += 1
return bert_word
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
UpperCamelCase = []
for i in range(0 , len(UpperCamelCase_ ) , 100 ):
UpperCamelCase = ltp_tokenizer.seg(lines[i : i + 100] )[0]
UpperCamelCase = [get_chinese_word(UpperCamelCase_ ) for r in res]
ltp_res.extend(UpperCamelCase_ )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
UpperCamelCase = []
for i in range(0 , len(UpperCamelCase_ ) , 100 ):
UpperCamelCase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
UpperCamelCase = []
for input_ids, chinese_word in zip(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase = []
for id in input_ids:
UpperCamelCase = bert_tokenizer._convert_id_to_token(UpperCamelCase_ )
input_tokens.append(UpperCamelCase_ )
UpperCamelCase = add_sub_symbol(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCamelCase_ ):
if token[:2] == "##":
UpperCamelCase = token[2:]
# save chinese tokens' pos
if len(UpperCamelCase_ ) == 1 and _is_chinese_char(ord(UpperCamelCase_ ) ):
ref_id.append(UpperCamelCase_ )
ref_ids.append(UpperCamelCase_ )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
return ref_ids
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
UpperCamelCase = f.readlines()
UpperCamelCase = [line.strip() for line in data if len(UpperCamelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCamelCase = LTP(args.ltp ) # faster in GPU device
UpperCamelCase = BertTokenizer.from_pretrained(args.bert )
UpperCamelCase = prepare_ref(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
UpperCamelCase = [json.dumps(UpperCamelCase_ ) + """\n""" for ref in ref_ids]
f.writelines(UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
main(args)
| 343 | 1 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_SCREAMING_SNAKE_CASE = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Optional[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any=16 , lowerCamelCase_ : Optional[int]=13 , lowerCamelCase_ : Optional[int]=7 , lowerCamelCase_ : List[str]=14 , lowerCamelCase_ : Optional[Any]=10 , lowerCamelCase_ : Tuple=19 , lowerCamelCase_ : Optional[int]=5 , lowerCamelCase_ : Dict=4 , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : List[str]=16 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Optional[int]=4 , lowerCamelCase_ : str=4 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : str=[1, 2, 3, 4, 5] , lowerCamelCase_ : Any=25 , lowerCamelCase_ : Any=5 , ):
"""simple docstring"""
UpperCamelCase = d_model
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = prediction_length
UpperCamelCase = context_length
UpperCamelCase = cardinality
UpperCamelCase = num_time_features
UpperCamelCase = lags_sequence
UpperCamelCase = embedding_dimension
UpperCamelCase = is_training
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = context_length
UpperCamelCase = prediction_length + label_length
UpperCamelCase = label_length
UpperCamelCase = moving_average
UpperCamelCase = autocorrelation_factor
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any ):
"""simple docstring"""
UpperCamelCase = config.context_length + max(config.lags_sequence )
UpperCamelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
UpperCamelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
UpperCamelCase = floats_tensor([self.batch_size, _past_length] )
UpperCamelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
UpperCamelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
UpperCamelCase = floats_tensor([self.batch_size, config.prediction_length] )
UpperCamelCase = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.get_config()
UpperCamelCase = self.prepare_autoformer_inputs_dict(lowerCamelCase_ )
return config, inputs_dict
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCamelCase = AutoformerModel(config=lowerCamelCase_ ).to(lowerCamelCase_ ).eval()
UpperCamelCase = model(**lowerCamelCase_ )
UpperCamelCase = outputs.encoder_last_hidden_state
UpperCamelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = model.get_encoder()
encoder.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoformerEncoder.from_pretrained(lowerCamelCase_ ).to(lowerCamelCase_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = model.create_network_inputs(**lowerCamelCase_ )
UpperCamelCase , UpperCamelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
UpperCamelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
UpperCamelCase = encoder(inputs_embeds=lowerCamelCase_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
UpperCamelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
UpperCamelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
UpperCamelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
UpperCamelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = model.get_decoder()
decoder.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoformerDecoder.from_pretrained(lowerCamelCase_ ).to(lowerCamelCase_ )
UpperCamelCase = decoder(
trend=lowerCamelCase_ , inputs_embeds=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__lowerCAmelCase = (AutoformerForPrediction,) if is_torch_available() else ()
__lowerCAmelCase = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = AutoformerModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ )
UpperCamelCase , UpperCamelCase = model_class.from_pretrained(lowerCamelCase_ , output_loading_info=lowerCamelCase_ )
self.assertEqual(info["""missing_keys"""] , [] )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowerCamelCase_ )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = inspect.signature(getattr(lowerCamelCase_ , """forward""" ) )
# The main input is the name of the argument after `self`
UpperCamelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(lowerCamelCase_ )] , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = True
UpperCamelCase = getattr(self.model_tester , """seq_length""" , lowerCamelCase_ )
UpperCamelCase = getattr(self.model_tester , """decoder_seq_length""" , lowerCamelCase_ )
UpperCamelCase = getattr(self.model_tester , """encoder_seq_length""" , lowerCamelCase_ )
UpperCamelCase = getattr(self.model_tester , """d_model""" , lowerCamelCase_ )
UpperCamelCase = getattr(self.model_tester , """num_attention_heads""" , lowerCamelCase_ )
UpperCamelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = True
UpperCamelCase = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase = True
UpperCamelCase = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = outputs.encoder_attentions
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
UpperCamelCase = len(lowerCamelCase_ )
UpperCamelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
# decoder attentions
UpperCamelCase = outputs.decoder_attentions
self.assertIsInstance(lowerCamelCase_ , (list, tuple) )
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
UpperCamelCase = outputs.cross_attentions
self.assertIsInstance(lowerCamelCase_ , (list, tuple) )
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(out_len + 2 , len(lowerCamelCase_ ) )
UpperCamelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowercase( UpperCamelCase_="train-batch.pt" ) -> List[str]:
'''simple docstring'''
UpperCamelCase = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=UpperCamelCase_ , repo_type="""dataset""" )
UpperCamelCase = torch.load(UpperCamelCase_ , map_location=UpperCamelCase_ )
return batch
@require_torch
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(lowerCamelCase_ )
UpperCamelCase = prepare_batch()
with torch.no_grad():
UpperCamelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
UpperCamelCase = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , lowerCamelCase_ )
UpperCamelCase = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=lowerCamelCase_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCamelCase_ , atol=lowerCamelCase_ ) )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(lowerCamelCase_ )
UpperCamelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
UpperCamelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
UpperCamelCase = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , lowerCamelCase_ )
UpperCamelCase = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=lowerCamelCase_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCamelCase_ , atol=lowerCamelCase_ ) )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(lowerCamelCase_ )
UpperCamelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
UpperCamelCase = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
UpperCamelCase = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , lowerCamelCase_ )
UpperCamelCase = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=lowerCamelCase_ )
UpperCamelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowerCamelCase_ , rtol=1E-1 ) )
| 343 | import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def __init__( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=13 , lowerCamelCase_ : Union[str, Any]=30 , lowerCamelCase_ : str=2 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : Union[str, Any]=5 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : Any=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : Union[str, Any]=10 , lowerCamelCase_ : Optional[Any]=0.0_2 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = num_patches + 1
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
return config, pixel_values
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = FlaxViTModel(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase = (self.image_size, self.image_size)
UpperCamelCase = (self.patch_size, self.patch_size)
UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.type_sequence_label_size
UpperCamelCase = FlaxViTForImageClassification(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = FlaxViTForImageClassification(lowerCamelCase_ )
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = FlaxViTModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = model_class(lowerCamelCase_ )
@jax.jit
def model_jitted(lowerCamelCase_ : Any , **lowerCamelCase_ : Any ):
return model(pixel_values=lowerCamelCase_ , **lowerCamelCase_ )
with self.subTest("""JIT Enabled""" ):
UpperCamelCase = model_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCamelCase = model_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
UpperCamelCase = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowerCamelCase_ )
| 343 | 1 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
_SCREAMING_SNAKE_CASE = {"""facebook/blenderbot_small-90M""": 5_1_2}
def lowercase( UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = set()
UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase = char
UpperCamelCase = set(UpperCamelCase_ )
return pairs
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Dict="__start__" , lowerCamelCase_ : Any="__end__" , lowerCamelCase_ : List[Any]="__unk__" , lowerCamelCase_ : List[str]="__null__" , **lowerCamelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , **lowerCamelCase_ )
with open(lowerCamelCase_ , encoding="""utf-8""" ) as vocab_handle:
UpperCamelCase = json.load(lowerCamelCase_ )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase_ , encoding="""utf-8""" ) as merges_handle:
UpperCamelCase = merges_handle.read().split("""\n""" )[1:-1]
UpperCamelCase = [tuple(merge.split() ) for merge in merges]
UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
UpperCamelCase = {}
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return len(self.encoder )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase = re.sub("""([.,!?()])""" , R""" \1""" , lowerCamelCase_ )
UpperCamelCase = re.sub("""(')""" , R""" \1 """ , lowerCamelCase_ )
UpperCamelCase = re.sub(R"""\s{2,}""" , """ """ , lowerCamelCase_ )
if "\n" in token:
UpperCamelCase = token.replace("""\n""" , """ __newln__""" )
UpperCamelCase = token.split(""" """ )
UpperCamelCase = []
for token in tokens:
if not len(lowerCamelCase_ ):
continue
UpperCamelCase = token.lower()
UpperCamelCase = tuple(lowerCamelCase_ )
UpperCamelCase = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
UpperCamelCase = get_pairs(lowerCamelCase_ )
if not pairs:
words.append(lowerCamelCase_ )
continue
while True:
UpperCamelCase = min(lowerCamelCase_ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase = bigram
UpperCamelCase = []
UpperCamelCase = 0
while i < len(lowerCamelCase_ ):
try:
UpperCamelCase = word.index(lowerCamelCase_ , lowerCamelCase_ )
new_word.extend(word[i:j] )
UpperCamelCase = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase = tuple(lowerCamelCase_ )
UpperCamelCase = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
UpperCamelCase = get_pairs(lowerCamelCase_ )
UpperCamelCase = """@@ """.join(lowerCamelCase_ )
UpperCamelCase = word[:-4]
UpperCamelCase = word
words.append(lowerCamelCase_ )
return " ".join(lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = re.findall(R"""\S+\n?""" , lowerCamelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase_ ).split(""" """ ) ) )
return split_tokens
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = token.lower()
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int ):
"""simple docstring"""
return self.decoder.get(lowerCamelCase_ , self.unk_token )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCamelCase = """ """.join(lowerCamelCase_ ).replace("""@@ """ , """""" ).strip()
return out_string
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + """\n""" )
UpperCamelCase = 0
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
UpperCamelCase = token_index
writer.write(""" """.join(lowerCamelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
| 343 | import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str=13 , lowerCamelCase_ : Any=7 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Dict=99 , lowerCamelCase_ : str=24 , lowerCamelCase_ : Optional[int]=2 , lowerCamelCase_ : List[str]=6 , lowerCamelCase_ : List[Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any=512 , lowerCamelCase_ : List[Any]=16 , lowerCamelCase_ : List[Any]=2 , lowerCamelCase_ : int=0.0_2 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[Any]=1000 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = range_bbox
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase = bbox[i, j, 3]
UpperCamelCase = bbox[i, j, 1]
UpperCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase = bbox[i, j, 2]
UpperCamelCase = bbox[i, j, 0]
UpperCamelCase = t
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , ):
"""simple docstring"""
UpperCamelCase = LiltModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = LiltForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , ):
"""simple docstring"""
UpperCamelCase = LiltForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCAmelCase = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict ):
"""simple docstring"""
return True
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = LiltModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = LiltModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(lowerCamelCase_ )
UpperCamelCase = torch.tensor([[1, 2]] , device=lowerCamelCase_ )
UpperCamelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(input_ids=lowerCamelCase_ , bbox=lowerCamelCase_ )
UpperCamelCase = torch.Size([1, 2, 768] )
UpperCamelCase = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=lowerCamelCase_ , )
self.assertTrue(outputs.last_hidden_state.shape , lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowerCamelCase_ , atol=1E-3 ) )
| 343 | 1 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ :
def __init__( self : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = question_encoder
UpperCamelCase = generator
UpperCamelCase = self.question_encoder
def lowerCamelCase_ ( self : int , lowerCamelCase_ : int ):
"""simple docstring"""
if os.path.isfile(lowerCamelCase_ ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
UpperCamelCase = os.path.join(lowerCamelCase_ , """question_encoder_tokenizer""" )
UpperCamelCase = os.path.join(lowerCamelCase_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(lowerCamelCase_ )
self.generator.save_pretrained(lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Any , **lowerCamelCase_ : str ):
"""simple docstring"""
from ..auto.tokenization_auto import AutoTokenizer
UpperCamelCase = kwargs.pop("""config""" , lowerCamelCase_ )
if config is None:
UpperCamelCase = RagConfig.from_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoTokenizer.from_pretrained(
lowerCamelCase_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
UpperCamelCase = AutoTokenizer.from_pretrained(
lowerCamelCase_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=lowerCamelCase_ , generator=lowerCamelCase_ )
def __call__( self : Union[str, Any] , *lowerCamelCase_ : int , **lowerCamelCase_ : Any ):
"""simple docstring"""
return self.current_tokenizer(*lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Dict ):
"""simple docstring"""
return self.generator.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] , *lowerCamelCase_ : str , **lowerCamelCase_ : List[Any] ):
"""simple docstring"""
return self.generator.decode(*lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.question_encoder
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.generator
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[List[str]] = None , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : str = "longest" , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = True , **lowerCamelCase_ : int , ):
"""simple docstring"""
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , lowerCamelCase_ , )
if max_length is None:
UpperCamelCase = self.current_tokenizer.model_max_length
UpperCamelCase = self(
lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_tensors=lowerCamelCase_ , max_length=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , **lowerCamelCase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
UpperCamelCase = self.current_tokenizer.model_max_length
UpperCamelCase = self(
text_target=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_tensors=lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = labels["""input_ids"""]
return model_inputs
| 343 | import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def __init__( self : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict=7 , lowerCamelCase_ : str=3 , lowerCamelCase_ : Any=30 , lowerCamelCase_ : str=400 , lowerCamelCase_ : str=True , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Dict=0.9 , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Dict=[0.5, 0.5, 0.5] , lowerCamelCase_ : Any=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
UpperCamelCase = size if size is not None else {"""shortest_edge""": 30}
UpperCamelCase = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize_and_center_crop
UpperCamelCase = size
UpperCamelCase = crop_pct
UpperCamelCase = crop_size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = PoolFormerImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """crop_pct""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 343 | 1 |
from collections.abc import Callable
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> float:
'''simple docstring'''
UpperCamelCase = a
UpperCamelCase = b
if function(UpperCamelCase_ ) == 0: # one of the a or b is a root for the function
return a
elif function(UpperCamelCase_ ) == 0:
return b
elif (
function(UpperCamelCase_ ) * function(UpperCamelCase_ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""" )
else:
UpperCamelCase = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(UpperCamelCase_ ) == 0:
return mid
elif function(UpperCamelCase_ ) * function(UpperCamelCase_ ) < 0:
UpperCamelCase = mid
else:
UpperCamelCase = mid
UpperCamelCase = start + (end - start) / 2.0
return mid
def lowercase( UpperCamelCase_ ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 343 | def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> float:
'''simple docstring'''
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(UpperCamelCase_ ) * abs(UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 343 | 1 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
_SCREAMING_SNAKE_CASE = {
"""vinai/phobert-base""": 2_5_6,
"""vinai/phobert-large""": 2_5_6,
}
def lowercase( UpperCamelCase_ ) -> str:
'''simple docstring'''
UpperCamelCase = set()
UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase = char
UpperCamelCase = set(UpperCamelCase_ )
return pairs
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple="<s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : Optional[int]="</s>" , lowerCamelCase_ : Optional[Any]="<s>" , lowerCamelCase_ : Tuple="<unk>" , lowerCamelCase_ : Any="<pad>" , lowerCamelCase_ : List[Any]="<mask>" , **lowerCamelCase_ : Any , ):
"""simple docstring"""
super().__init__(
bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = vocab_file
UpperCamelCase = merges_file
UpperCamelCase = {}
UpperCamelCase = 0
UpperCamelCase = 1
UpperCamelCase = 2
UpperCamelCase = 3
self.add_from_file(lowerCamelCase_ )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase_ , encoding="""utf-8""" ) as merges_handle:
UpperCamelCase = merges_handle.read().split("""\n""" )[:-1]
UpperCamelCase = [tuple(merge.split()[:-1] ) for merge in merges]
UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
UpperCamelCase = {}
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return len(self.encoder )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase = tuple(lowerCamelCase_ )
UpperCamelCase = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
UpperCamelCase = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
UpperCamelCase = min(lowerCamelCase_ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase = bigram
UpperCamelCase = []
UpperCamelCase = 0
while i < len(lowerCamelCase_ ):
try:
UpperCamelCase = word.index(lowerCamelCase_ , lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase = tuple(lowerCamelCase_ )
UpperCamelCase = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
UpperCamelCase = get_pairs(lowerCamelCase_ )
UpperCamelCase = """@@ """.join(lowerCamelCase_ )
UpperCamelCase = word[:-4]
UpperCamelCase = word
return word
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = re.findall(R"""\S+\n?""" , lowerCamelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase_ ).split(""" """ ) ) )
return split_tokens
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : int ):
"""simple docstring"""
return self.decoder.get(lowerCamelCase_ , self.unk_token )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCamelCase = """ """.join(lowerCamelCase_ ).replace("""@@ """ , """""" ).strip()
return out_string
def lowerCamelCase_ ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file , lowerCamelCase_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.merges_file , lowerCamelCase_ )
return out_vocab_file, out_merge_file
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
try:
with open(lowerCamelCase_ , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(lowerCamelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"""Incorrect encoding detected in {f}, please rebuild the dataset""" )
return
UpperCamelCase = f.readlines()
for lineTmp in lines:
UpperCamelCase = lineTmp.strip()
UpperCamelCase = line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" )
UpperCamelCase = line[:idx]
UpperCamelCase = len(self.encoder )
| 343 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = """trocr"""
__lowerCAmelCase = ["""past_key_values"""]
__lowerCAmelCase = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[int]=5_0265 , lowerCamelCase_ : Optional[int]=1024 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Any=16 , lowerCamelCase_ : Tuple=4096 , lowerCamelCase_ : Tuple="gelu" , lowerCamelCase_ : List[str]=512 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=0.0 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : Union[str, Any]=2 , lowerCamelCase_ : Tuple=0.0_2 , lowerCamelCase_ : Union[str, Any]=0.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : List[str]=1 , lowerCamelCase_ : Optional[Any]=0 , lowerCamelCase_ : List[Any]=2 , **lowerCamelCase_ : Union[str, Any] , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = d_model
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = activation_function
UpperCamelCase = max_position_embeddings
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = init_std
UpperCamelCase = decoder_layerdrop
UpperCamelCase = use_cache
UpperCamelCase = scale_embedding
UpperCamelCase = use_learned_position_embeddings
UpperCamelCase = layernorm_embedding
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
| 343 | 1 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = ort.SessionOptions()
UpperCamelCase = False
return options
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
UpperCamelCase = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = """A red cat sitting on a park bench"""
UpperCamelCase = np.random.RandomState(0 )
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=15 , generator=lowerCamelCase_ , output_type="""np""" , )
UpperCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 343 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase ):
__lowerCAmelCase = """swin"""
__lowerCAmelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Any , lowerCamelCase_ : Optional[int]=224 , lowerCamelCase_ : Union[str, Any]=4 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[Any]=96 , lowerCamelCase_ : int=[2, 2, 6, 2] , lowerCamelCase_ : Dict=[3, 6, 12, 24] , lowerCamelCase_ : str=7 , lowerCamelCase_ : Tuple=4.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any="gelu" , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : Optional[Any]=0.0_2 , lowerCamelCase_ : str=1E-5 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : str=None , lowerCamelCase_ : Any=None , **lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = depths
UpperCamelCase = len(lowerCamelCase_ )
UpperCamelCase = num_heads
UpperCamelCase = window_size
UpperCamelCase = mlp_ratio
UpperCamelCase = qkv_bias
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = drop_path_rate
UpperCamelCase = hidden_act
UpperCamelCase = use_absolute_embeddings
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
UpperCamelCase = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
UpperCamelCase , UpperCamelCase = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = version.parse("""1.11""" )
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
return 1E-4
| 343 | 1 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = """hf-internal-testing/tiny-random-t5"""
UpperCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase_ )
UpperCamelCase = tokenizer("""This is me""" , return_tensors="""pt""" )
UpperCamelCase = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
UpperCamelCase = model.generate(**lowerCamelCase_ )
UpperCamelCase = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase_ )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
UpperCamelCase = model_reloaded.generate(**lowerCamelCase_ )
self.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ ) )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = """hf-internal-testing/tiny-random-t5"""
UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase_ )
UpperCamelCase = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowerCamelCase_ ):
model.save_pretrained(lowerCamelCase_ )
UpperCamelCase = model.reverse_bettertransformer()
model.save_pretrained(lowerCamelCase_ )
| 343 | import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_SCREAMING_SNAKE_CASE = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
_SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary"""
)
_SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(6_4, 6_4)
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image)
_SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0)
_SCREAMING_SNAKE_CASE = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_SCREAMING_SNAKE_CASE = """Normal"""
if result[0][0] == 1:
_SCREAMING_SNAKE_CASE = """Abnormality detected"""
| 343 | 1 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = PhobertTokenizer
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase = ["""T@@""", """i""", """I""", """R@@""", """r""", """e@@"""]
UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
UpperCamelCase = ["""#version: 0.2""", """l à</w>"""]
UpperCamelCase = {"""unk_token""": """<unk>"""}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase_ ) )
def lowerCamelCase_ ( self : str , **lowerCamelCase_ : List[str] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = """Tôi là VinAI Research"""
UpperCamelCase = """T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"""
return input_text, output_text
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase = """Tôi là VinAI Research"""
UpperCamelCase = """T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h""".split()
UpperCamelCase = tokenizer.tokenize(lowerCamelCase_ )
print(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = tokens + [tokenizer.unk_token]
UpperCamelCase = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , lowerCamelCase_ )
| 343 | from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
pass
class SCREAMING_SNAKE_CASE_ :
def __init__( self : List[Any] , lowerCamelCase_ : Any ):
"""simple docstring"""
UpperCamelCase = data
UpperCamelCase = None
def __iter__( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self
UpperCamelCase = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(lowerCamelCase_ )
yield node.data
UpperCamelCase = node.next_node
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = Node(1)
_SCREAMING_SNAKE_CASE = Node(2)
_SCREAMING_SNAKE_CASE = Node(3)
_SCREAMING_SNAKE_CASE = Node(4)
print(root_node.has_loop) # False
_SCREAMING_SNAKE_CASE = root_node.next_node
print(root_node.has_loop) # True
_SCREAMING_SNAKE_CASE = Node(5)
_SCREAMING_SNAKE_CASE = Node(6)
_SCREAMING_SNAKE_CASE = Node(5)
_SCREAMING_SNAKE_CASE = Node(6)
print(root_node.has_loop) # False
_SCREAMING_SNAKE_CASE = Node(1)
print(root_node.has_loop) # False
| 343 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ :
__lowerCAmelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__lowerCAmelCase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class SCREAMING_SNAKE_CASE_ :
__lowerCAmelCase = field(default=__lowerCAmelCase , metadata={"""help""": """The input training data file (a text file)."""} )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
if self.train_file is not None:
UpperCamelCase = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCamelCase = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class SCREAMING_SNAKE_CASE_ :
__lowerCAmelCase = 42
__lowerCAmelCase = True
__lowerCAmelCase = None
__lowerCAmelCase = None
def __call__( self : List[str] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = """label""" if """label""" in features[0].keys() else """labels"""
UpperCamelCase = [feature.pop(lowerCamelCase_ ) for feature in features]
UpperCamelCase = len(lowerCamelCase_ )
UpperCamelCase = len(features[0]["""input_ids"""] )
UpperCamelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(lowerCamelCase_ )] for feature in features
]
UpperCamelCase = list(chain(*lowerCamelCase_ ) )
UpperCamelCase = self.tokenizer.pad(
lowerCamelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
UpperCamelCase = {k: v.view(lowerCamelCase_ , lowerCamelCase_ , -1 ) for k, v in batch.items()}
# Add back labels
UpperCamelCase = torch.tensor(lowerCamelCase_ , dtype=torch.intaa )
return batch
def lowercase( ) -> int:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , UpperCamelCase_ , UpperCamelCase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase_ )
datasets.utils.logging.set_verbosity(UpperCamelCase_ )
transformers.utils.logging.set_verbosity(UpperCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCamelCase = {}
if data_args.train_file is not None:
UpperCamelCase = data_args.train_file
if data_args.validation_file is not None:
UpperCamelCase = data_args.validation_file
UpperCamelCase = data_args.train_file.split(""".""" )[-1]
UpperCamelCase = load_dataset(
UpperCamelCase_ , data_files=UpperCamelCase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCamelCase = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCamelCase = [f"""ending{i}""" for i in range(4 )]
UpperCamelCase = """sent1"""
UpperCamelCase = """sent2"""
if data_args.max_seq_length is None:
UpperCamelCase = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
UpperCamelCase = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
UpperCamelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCamelCase_ ):
UpperCamelCase = [[context] * 4 for context in examples[context_name]]
UpperCamelCase = examples[question_header_name]
UpperCamelCase = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(UpperCamelCase_ )
]
# Flatten out
UpperCamelCase = list(chain(*UpperCamelCase_ ) )
UpperCamelCase = list(chain(*UpperCamelCase_ ) )
# Tokenize
UpperCamelCase = tokenizer(
UpperCamelCase_ , UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
UpperCamelCase = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
UpperCamelCase = min(len(UpperCamelCase_ ) , data_args.max_train_samples )
UpperCamelCase = train_dataset.select(range(UpperCamelCase_ ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
UpperCamelCase = train_dataset.map(
UpperCamelCase_ , batched=UpperCamelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
UpperCamelCase = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
UpperCamelCase = min(len(UpperCamelCase_ ) , data_args.max_eval_samples )
UpperCamelCase = eval_dataset.select(range(UpperCamelCase_ ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
UpperCamelCase = eval_dataset.map(
UpperCamelCase_ , batched=UpperCamelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCamelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCamelCase_ ):
UpperCamelCase , UpperCamelCase = eval_predictions
UpperCamelCase = np.argmax(UpperCamelCase_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCamelCase = Trainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase_ , data_collator=UpperCamelCase_ , compute_metrics=UpperCamelCase_ , )
# Training
if training_args.do_train:
UpperCamelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase = last_checkpoint
UpperCamelCase = trainer.train(resume_from_checkpoint=UpperCamelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCamelCase = train_result.metrics
UpperCamelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase_ )
)
UpperCamelCase = min(UpperCamelCase_ , len(UpperCamelCase_ ) )
trainer.log_metrics("""train""" , UpperCamelCase_ )
trainer.save_metrics("""train""" , UpperCamelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase = trainer.evaluate()
UpperCamelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase_ )
UpperCamelCase = min(UpperCamelCase_ , len(UpperCamelCase_ ) )
trainer.log_metrics("""eval""" , UpperCamelCase_ )
trainer.save_metrics("""eval""" , UpperCamelCase_ )
UpperCamelCase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase_ )
else:
trainer.create_model_card(**UpperCamelCase_ )
def lowercase( UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 343 | import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=lowerCamelCase_ , )
assert hasattr(self , """env""" )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCamelCase = {
"""enabled""": True,
"""processes_per_host""": 8,
}
UpperCamelCase = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
UpperCamelCase = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
UpperCamelCase = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=lowerCamelCase_ , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase_ , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=lowerCamelCase_ , py_version="""py36""" , )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
TrainingJobAnalytics(lowerCamelCase_ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = self.create_estimator(lowerCamelCase_ )
# run training
estimator.fit()
# result dataframe
UpperCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , lowerCamelCase_ )
| 343 | 1 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any=2 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Any=4 , lowerCamelCase_ : List[Any]=2 , lowerCamelCase_ : List[str]=7 , lowerCamelCase_ : str=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : str=True , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[str]=99 , lowerCamelCase_ : List[str]=36 , lowerCamelCase_ : Optional[Any]=3 , lowerCamelCase_ : Tuple=4 , lowerCamelCase_ : Optional[int]=37 , lowerCamelCase_ : Dict="gelu" , lowerCamelCase_ : int=0.1 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : Any=512 , lowerCamelCase_ : Any=16 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Tuple=0.0_2 , lowerCamelCase_ : Any=6 , lowerCamelCase_ : Tuple=6 , lowerCamelCase_ : Union[str, Any]=3 , lowerCamelCase_ : List[Any]=4 , lowerCamelCase_ : Any=None , lowerCamelCase_ : Dict=1000 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = text_seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = coordinate_size
UpperCamelCase = shape_size
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
UpperCamelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCamelCase = text_seq_length
UpperCamelCase = (image_size // patch_size) ** 2 + 1
UpperCamelCase = self.text_seq_length + self.image_seq_length
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase = bbox[i, j, 3]
UpperCamelCase = bbox[i, j, 1]
UpperCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase = bbox[i, j, 2]
UpperCamelCase = bbox[i, j, 0]
UpperCamelCase = t
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCamelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = LayoutLMvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# text + image
UpperCamelCase = model(lowerCamelCase_ , pixel_values=lowerCamelCase_ )
UpperCamelCase = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCamelCase = model(pixel_values=lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : Any , lowerCamelCase_ : Dict ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = LayoutLMvaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = LayoutLMvaForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = LayoutLMvaForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCAmelCase = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ):
"""simple docstring"""
return True
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = LayoutLMvaModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str]=False ):
"""simple docstring"""
UpperCamelCase = copy.deepcopy(lowerCamelCase_ )
if model_class in get_values(lowerCamelCase_ ):
UpperCamelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCamelCase_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
UpperCamelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in get_values(lowerCamelCase_ ):
UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
UpperCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCamelCase_ , )
return inputs_dict
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = LayoutLMvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase( ) -> int:
'''simple docstring'''
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase_ ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(lowerCamelCase_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" ).pixel_values.to(lowerCamelCase_ )
UpperCamelCase = torch.tensor([[1, 2]] )
UpperCamelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
UpperCamelCase = model(
input_ids=input_ids.to(lowerCamelCase_ ) , bbox=bbox.to(lowerCamelCase_ ) , pixel_values=pixel_values.to(lowerCamelCase_ ) , )
# verify the logits
UpperCamelCase = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase_ )
UpperCamelCase = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 343 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE = {
"""configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""ConvNextFeatureExtractor"""]
_SCREAMING_SNAKE_CASE = ["""ConvNextImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvNextForImageClassification""",
"""ConvNextModel""",
"""ConvNextPreTrainedModel""",
"""ConvNextBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""TFConvNextForImageClassification""",
"""TFConvNextModel""",
"""TFConvNextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 343 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase ):
__lowerCAmelCase = """bit"""
__lowerCAmelCase = ["""preactivation""", """bottleneck"""]
__lowerCAmelCase = ["""SAME""", """VALID"""]
def __init__( self : Optional[int] , lowerCamelCase_ : Union[str, Any]=3 , lowerCamelCase_ : Tuple=64 , lowerCamelCase_ : Union[str, Any]=[256, 512, 1024, 2048] , lowerCamelCase_ : str=[3, 4, 6, 3] , lowerCamelCase_ : str="preactivation" , lowerCamelCase_ : Dict="relu" , lowerCamelCase_ : Any=None , lowerCamelCase_ : Optional[Any]=32 , lowerCamelCase_ : Dict=0.0 , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : Any=32 , lowerCamelCase_ : int=1 , lowerCamelCase_ : Any=None , lowerCamelCase_ : str=None , **lowerCamelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
UpperCamelCase = global_padding.upper()
else:
raise ValueError(f"""Padding strategy {global_padding} not supported""" )
UpperCamelCase = num_channels
UpperCamelCase = embedding_size
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = layer_type
UpperCamelCase = hidden_act
UpperCamelCase = global_padding
UpperCamelCase = num_groups
UpperCamelCase = drop_path_rate
UpperCamelCase = embedding_dynamic_padding
UpperCamelCase = output_stride
UpperCamelCase = width_factor
UpperCamelCase = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
UpperCamelCase , UpperCamelCase = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
| 343 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = ShapEPipeline
__lowerCAmelCase = ["""prompt"""]
__lowerCAmelCase = ["""prompt"""]
__lowerCAmelCase = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
__lowerCAmelCase = False
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return 8
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowerCamelCase_ )
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
UpperCamelCase = PriorTransformer(**lowerCamelCase_ )
return model
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
UpperCamelCase = ShapERenderer(**lowerCamelCase_ )
return model
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.dummy_prior
UpperCamelCase = self.dummy_text_encoder
UpperCamelCase = self.dummy_tokenizer
UpperCamelCase = self.dummy_renderer
UpperCamelCase = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=lowerCamelCase_ , clip_sample=lowerCamelCase_ , clip_sample_range=1.0 , )
UpperCamelCase = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any]=0 ):
"""simple docstring"""
if str(lowerCamelCase_ ).startswith("""mps""" ):
UpperCamelCase = torch.manual_seed(lowerCamelCase_ )
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
UpperCamelCase = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = """cpu"""
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = self.pipeline_class(**lowerCamelCase_ )
UpperCamelCase = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
UpperCamelCase = output.images[0]
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCamelCase = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = torch_device == """cpu"""
UpperCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCamelCase_ , relax_max_difference=lowerCamelCase_ , )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = self.pipeline_class(**lowerCamelCase_ )
UpperCamelCase = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = 1
UpperCamelCase = 2
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
UpperCamelCase = batch_size * [inputs[key]]
UpperCamelCase = pipe(**lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
UpperCamelCase = ShapEPipeline.from_pretrained("""openai/shap-e""" )
UpperCamelCase = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCamelCase = pipe(
"""a shark""" , generator=lowerCamelCase_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
| 343 | 1 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = """char"""
__lowerCAmelCase = """bpe"""
__lowerCAmelCase = """wp"""
_SCREAMING_SNAKE_CASE = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = ["""image_processor""", """char_tokenizer"""]
__lowerCAmelCase = """ViTImageProcessor"""
__lowerCAmelCase = """MgpstrTokenizer"""
def __init__( self : List[Any] , lowerCamelCase_ : str=None , lowerCamelCase_ : str=None , **lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCamelCase_ , )
UpperCamelCase = kwargs.pop("""feature_extractor""" )
UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
UpperCamelCase = tokenizer
UpperCamelCase = AutoTokenizer.from_pretrained("""gpt2""" )
UpperCamelCase = AutoTokenizer.from_pretrained("""bert-base-uncased""" )
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
def __call__( self : Union[str, Any] , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : List[Any]=None , **lowerCamelCase_ : Any ):
"""simple docstring"""
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
UpperCamelCase = self.image_processor(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
if text is not None:
UpperCamelCase = self.char_tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCamelCase = encodings["""input_ids"""]
return inputs
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase = sequences
UpperCamelCase = char_preds.size(0 )
UpperCamelCase , UpperCamelCase = self._decode_helper(lowerCamelCase_ , """char""" )
UpperCamelCase , UpperCamelCase = self._decode_helper(lowerCamelCase_ , """bpe""" )
UpperCamelCase , UpperCamelCase = self._decode_helper(lowerCamelCase_ , """wp""" )
UpperCamelCase = []
UpperCamelCase = []
for i in range(lowerCamelCase_ ):
UpperCamelCase = [char_scores[i], bpe_scores[i], wp_scores[i]]
UpperCamelCase = [char_strs[i], bpe_strs[i], wp_strs[i]]
UpperCamelCase = scores.index(max(lowerCamelCase_ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
UpperCamelCase = {}
UpperCamelCase = final_strs
UpperCamelCase = final_scores
UpperCamelCase = char_strs
UpperCamelCase = bpe_strs
UpperCamelCase = wp_strs
return out
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict ):
"""simple docstring"""
if format == DecodeType.CHARACTER:
UpperCamelCase = self.char_decode
UpperCamelCase = 1
UpperCamelCase = """[s]"""
elif format == DecodeType.BPE:
UpperCamelCase = self.bpe_decode
UpperCamelCase = 2
UpperCamelCase = """#"""
elif format == DecodeType.WORDPIECE:
UpperCamelCase = self.wp_decode
UpperCamelCase = 102
UpperCamelCase = """[SEP]"""
else:
raise ValueError(f"""Format {format} is not supported.""" )
UpperCamelCase , UpperCamelCase = [], []
UpperCamelCase = pred_logits.size(0 )
UpperCamelCase = pred_logits.size(1 )
UpperCamelCase , UpperCamelCase = pred_logits.topk(1 , dim=-1 , largest=lowerCamelCase_ , sorted=lowerCamelCase_ )
UpperCamelCase = preds_index.view(-1 , lowerCamelCase_ )[:, 1:]
UpperCamelCase = decoder(lowerCamelCase_ )
UpperCamelCase , UpperCamelCase = torch.nn.functional.softmax(lowerCamelCase_ , dim=2 ).max(dim=2 )
UpperCamelCase = preds_max_prob[:, 1:]
for index in range(lowerCamelCase_ ):
UpperCamelCase = preds_str[index].find(lowerCamelCase_ )
UpperCamelCase = preds_str[index][:pred_eos]
UpperCamelCase = preds_index[index].cpu().tolist()
UpperCamelCase = pred_index.index(lowerCamelCase_ ) if eos_token in pred_index else -1
UpperCamelCase = preds_max_prob[index][: pred_eos_index + 1]
UpperCamelCase = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowerCamelCase_ )
conf_scores.append(lowerCamelCase_ )
return dec_strs, conf_scores
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = [seq.replace(""" """ , """""" ) for seq in self.char_tokenizer.batch_decode(lowerCamelCase_ )]
return decode_strs
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
return self.bpe_tokenizer.batch_decode(lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
UpperCamelCase = [seq.replace(""" """ , """""" ) for seq in self.wp_tokenizer.batch_decode(lowerCamelCase_ )]
return decode_strs
| 343 | from __future__ import annotations
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> list:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase , UpperCamelCase = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
UpperCamelCase = result + left + right
return input_list
def lowercase( UpperCamelCase_ ) -> list:
'''simple docstring'''
if len(UpperCamelCase_ ) <= 1:
return input_list
UpperCamelCase = list(UpperCamelCase_ )
# iteration for two-way merging
UpperCamelCase = 2
while p <= len(UpperCamelCase_ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(UpperCamelCase_ ) , UpperCamelCase_ ):
UpperCamelCase = i
UpperCamelCase = i + p - 1
UpperCamelCase = (low + high + 1) // 2
UpperCamelCase = merge(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# final merge of last two parts
if p * 2 >= len(UpperCamelCase_ ):
UpperCamelCase = i
UpperCamelCase = merge(UpperCamelCase_ , 0 , UpperCamelCase_ , len(UpperCamelCase_ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""").strip()
if user_input == "":
_SCREAMING_SNAKE_CASE = []
else:
_SCREAMING_SNAKE_CASE = [int(item.strip()) for item in user_input.split(""",""")]
print(iter_merge_sort(unsorted))
| 343 | 1 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowercase( UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
UpperCamelCase = filter(lambda UpperCamelCase_ : p.requires_grad , model.parameters() )
UpperCamelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
if metric == "rouge2":
UpperCamelCase = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
UpperCamelCase = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
UpperCamelCase = """{val_avg_em:.4f}-{step_count}"""
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
""" function.""" )
UpperCamelCase = ModelCheckpoint(
dirpath=UpperCamelCase_ , filename=UpperCamelCase_ , monitor=f"""val_{metric}""" , mode="""max""" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
return EarlyStopping(
monitor=f"""val_{metric}""" , mode="""min""" if """loss""" in metric else """max""" , patience=UpperCamelCase_ , verbose=UpperCamelCase_ , )
class SCREAMING_SNAKE_CASE_ ( pl.Callback ):
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Any , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = {f"""lr_group_{i}""": param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCamelCase_ )
@rank_zero_only
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : pl.Trainer , lowerCamelCase_ : pl.LightningModule , lowerCamelCase_ : str , lowerCamelCase_ : List[str]=True ):
"""simple docstring"""
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
UpperCamelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
UpperCamelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCamelCase = od / """test_results.txt"""
UpperCamelCase = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCamelCase = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
UpperCamelCase = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowerCamelCase_ )
generations_file.parent.mkdir(exist_ok=lowerCamelCase_ )
with open(lowerCamelCase_ , """a+""" ) as writer:
for key in sorted(lowerCamelCase_ ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCamelCase = metrics[key]
if isinstance(lowerCamelCase_ , torch.Tensor ):
UpperCamelCase = val.item()
UpperCamelCase = f"""{key}: {val:.6f}\n"""
writer.write(lowerCamelCase_ )
if not save_generations:
return
if "preds" in metrics:
UpperCamelCase = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(lowerCamelCase_ )
@rank_zero_only
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict ):
"""simple docstring"""
try:
UpperCamelCase = pl_module.model.model.num_parameters()
except AttributeError:
UpperCamelCase = pl_module.model.num_parameters()
UpperCamelCase = count_trainable_parameters(lowerCamelCase_ )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6} )
@rank_zero_only
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : pl.Trainer , lowerCamelCase_ : pl.LightningModule ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCamelCase_ , lowerCamelCase_ , """test""" )
@rank_zero_only
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : pl.Trainer , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 343 | import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=32 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : int=10 , lowerCamelCase_ : Optional[int]=[8, 16, 32, 64] , lowerCamelCase_ : List[str]=[1, 1, 2, 1] , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : List[Any]="relu" , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=None , lowerCamelCase_ : List[Any]=["stage2", "stage3", "stage4"] , lowerCamelCase_ : Optional[Any]=[2, 3, 4] , lowerCamelCase_ : List[Any]=1 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = embeddings_size
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = len(lowerCamelCase_ )
UpperCamelCase = out_features
UpperCamelCase = out_indices
UpperCamelCase = num_groups
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCamelCase = BitModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = BitForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = BitBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase = None
UpperCamelCase = BitBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__lowerCAmelCase = (
{"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = BitModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return
@unittest.skip(reason="""Bit does not output attentions""" )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(config=lowerCamelCase_ )
for name, module in model.named_modules():
if isinstance(lowerCamelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ):
UpperCamelCase = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase = layer_type
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = BitModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase( ) -> Any:
'''simple docstring'''
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**lowerCamelCase_ )
# verify the logits
UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
UpperCamelCase = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (BitBackbone,) if is_torch_available() else ()
__lowerCAmelCase = BitConfig
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = BitModelTester(self )
| 343 | 1 |
from jiwer import compute_measures
import datasets
_SCREAMING_SNAKE_CASE = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
_SCREAMING_SNAKE_CASE = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
_SCREAMING_SNAKE_CASE = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : Optional[int]=False ):
"""simple docstring"""
if concatenate_texts:
return compute_measures(lowerCamelCase_ , lowerCamelCase_ )["wer"]
else:
UpperCamelCase = 0
UpperCamelCase = 0
for prediction, reference in zip(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase = compute_measures(lowerCamelCase_ , lowerCamelCase_ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 343 | from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=3 , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[int]=10 , lowerCamelCase_ : List[str]=[10, 20, 30, 40] , lowerCamelCase_ : Tuple=[1, 1, 2, 1] , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : Tuple="relu" , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Dict=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = embeddings_size
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = len(lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = TFResNetModel(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = TFResNetForImageClassification(lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__lowerCAmelCase = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = TFResNetModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : str ):
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase = layer_type
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFResNetModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase( ) -> Any:
'''simple docstring'''
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" )
# forward pass
UpperCamelCase = model(**lowerCamelCase_ )
# verify the logits
UpperCamelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
UpperCamelCase = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCamelCase_ , atol=1E-4 ) )
| 343 | 1 |
def lowercase( UpperCamelCase_ ) -> bool:
'''simple docstring'''
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase = f"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCamelCase_ )
if number < 0:
return False
UpperCamelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343 | import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_SCREAMING_SNAKE_CASE = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
def lowercase( UpperCamelCase_ ) -> Any:
'''simple docstring'''
UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
UpperCamelCase = value
else:
UpperCamelCase = value
return new_state_dict
def lowercase( UpperCamelCase_ , UpperCamelCase_=False ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = """"""
if is_panoptic:
UpperCamelCase = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
def lowercase( ) -> Any:
'''simple docstring'''
UpperCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any:
'''simple docstring'''
UpperCamelCase = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCamelCase = """resnet101"""
if "dc5" in model_name:
UpperCamelCase = True
UpperCamelCase = """panoptic""" in model_name
if is_panoptic:
UpperCamelCase = 250
else:
UpperCamelCase = 91
UpperCamelCase = """huggingface/label-files"""
UpperCamelCase = """coco-detection-id2label.json"""
UpperCamelCase = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
# load image processor
UpperCamelCase = """coco_panoptic""" if is_panoptic else """coco_detection"""
UpperCamelCase = ConditionalDetrImageProcessor(format=UpperCamelCase_ )
# prepare image
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=UpperCamelCase_ , return_tensors="""pt""" )
UpperCamelCase = encoding["""pixel_values"""]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
UpperCamelCase = torch.hub.load("""DeppMeng/ConditionalDETR""" , UpperCamelCase_ , pretrained=UpperCamelCase_ ).eval()
UpperCamelCase = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCamelCase = """conditional_detr.""" + src
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = rename_backbone_keys(UpperCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase_ , is_panoptic=UpperCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
# finally, create HuggingFace model and load state dict
UpperCamelCase = ConditionalDetrForSegmentation(UpperCamelCase_ ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
model.push_to_hub(repo_id=UpperCamelCase_ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
UpperCamelCase = conditional_detr(UpperCamelCase_ )
UpperCamelCase = model(UpperCamelCase_ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 343 | 1 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class SCREAMING_SNAKE_CASE_ ( datasets.BuilderConfig ):
__lowerCAmelCase = None
class SCREAMING_SNAKE_CASE_ ( datasets.ArrowBasedBuilder ):
__lowerCAmelCase = PandasConfig
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCamelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCamelCase_ , (str, list, tuple) ):
UpperCamelCase = data_files
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCamelCase = [dl_manager.iter_files(lowerCamelCase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
UpperCamelCase = []
for split_name, files in data_files.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCamelCase = [dl_manager.iter_files(lowerCamelCase_ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCamelCase_ , gen_kwargs={"""files""": files} ) )
return splits
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : pa.Table ):
"""simple docstring"""
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCamelCase = table_cast(lowerCamelCase_ , self.config.features.arrow_schema )
return pa_table
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Any ):
"""simple docstring"""
for i, file in enumerate(itertools.chain.from_iterable(lowerCamelCase_ ) ):
with open(lowerCamelCase_ , """rb""" ) as f:
UpperCamelCase = pa.Table.from_pandas(pd.read_pickle(lowerCamelCase_ ) )
yield i, self._cast_table(lowerCamelCase_ )
| 343 | from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Dict=13 , lowerCamelCase_ : str=30 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=3 , lowerCamelCase_ : Any=True , lowerCamelCase_ : int=True , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : int=4 , lowerCamelCase_ : str=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : List[Any]=10 , lowerCamelCase_ : List[Any]=0.0_2 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[Any]=0.6 , lowerCamelCase_ : Optional[Any]=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = mask_ratio
UpperCamelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = TFViTMAEModel(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = TFViTMAEForPreTraining(lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ )
# expected sequence length = num_patches
UpperCamelCase = (self.image_size // self.patch_size) ** 2
UpperCamelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = TFViTMAEForPreTraining(lowerCamelCase_ )
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ )
UpperCamelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__lowerCAmelCase = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = TFViTMAEModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
UpperCamelCase = copy.deepcopy(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ )
UpperCamelCase = outputs_dict[0].numpy()
UpperCamelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(lowerCamelCase_ : List[Any] ):
UpperCamelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(lowerCamelCase_ ):
UpperCamelCase = v.numpy()
else:
UpperCamelCase = np.array(lowerCamelCase_ )
return inputs_np_dict
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = prepare_numpy_arrays(lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ )
self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase = tf.constant(lowerCamelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase = tf_noise
super().check_pt_tf_models(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(lowerCamelCase_ )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(lowerCamelCase_ , lowerCamelCase_ ),)
if isinstance(lowerCamelCase_ , lowerCamelCase_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(lowerCamelCase_ , """_keras_serializable""" , lowerCamelCase_ )
}
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase = tf.convert_to_tensor(lowerCamelCase_ )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
UpperCamelCase = main_layer_class(lowerCamelCase_ )
UpperCamelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
UpperCamelCase = tf.keras.Model(lowerCamelCase_ , outputs=main_layer(lowerCamelCase_ ) )
UpperCamelCase = model(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = os.path.join(lowerCamelCase_ , """keras_model.h5""" )
model.save(lowerCamelCase_ )
UpperCamelCase = tf.keras.models.load_model(
lowerCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(lowerCamelCase_ , tf.keras.Model )
UpperCamelCase = model(lowerCamelCase_ )
self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
UpperCamelCase = outputs.last_hidden_state.numpy()
UpperCamelCase = 0
else:
UpperCamelCase = outputs.logits.numpy()
UpperCamelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ , saved_model=lowerCamelCase_ )
UpperCamelCase = model_class.from_pretrained(lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
UpperCamelCase = after_outputs["""last_hidden_state"""].numpy()
UpperCamelCase = 0
else:
UpperCamelCase = after_outputs["""logits"""].numpy()
UpperCamelCase = 0
UpperCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase_ , 1E-5 )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
UpperCamelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(lowerCamelCase_ )
UpperCamelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
UpperCamelCase = model_class.from_config(model.config )
UpperCamelCase = new_model(lowerCamelCase_ ) # Build model
new_model.set_weights(model.get_weights() )
UpperCamelCase = new_model(lowerCamelCase_ , noise=lowerCamelCase_ )
self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
pass
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase( ) -> int:
'''simple docstring'''
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase = ViTMAEConfig()
UpperCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ )
# verify the logits
UpperCamelCase = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
UpperCamelCase = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
| 343 | 1 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def lowercase( UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
UpperCamelCase = int(UpperCamelCase_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase = t // 3600, (t // 60) % 60, t % 60
return f"""{h}:{m:02d}:{s:02d}""" if h != 0 else f"""{m:02d}:{s:02d}"""
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=300 ) -> Union[str, Any]:
'''simple docstring'''
# docstyle-ignore
return f"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def lowercase( UpperCamelCase_ ) -> str:
'''simple docstring'''
UpperCamelCase = """<table border=\"1\" class=\"dataframe\">\n"""
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
UpperCamelCase = f"""{elt:.6f}""" if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else str(UpperCamelCase_ )
html_code += f""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class SCREAMING_SNAKE_CASE_ :
__lowerCAmelCase = 5
__lowerCAmelCase = 0.2
def __init__( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional["NotebookTrainingTracker"] = None , lowerCamelCase_ : int = 300 , ):
"""simple docstring"""
UpperCamelCase = total
UpperCamelCase = """""" if prefix is None else prefix
UpperCamelCase = leave
UpperCamelCase = parent
UpperCamelCase = width
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : bool = False , lowerCamelCase_ : str = None ):
"""simple docstring"""
UpperCamelCase = value
if comment is not None:
UpperCamelCase = comment
if self.last_value is None:
UpperCamelCase = UpperCamelCase = time.time()
UpperCamelCase = UpperCamelCase = value
UpperCamelCase = UpperCamelCase = None
UpperCamelCase = self.warmup
UpperCamelCase = 1
self.update_bar(lowerCamelCase_ )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
UpperCamelCase = time.time()
UpperCamelCase = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
UpperCamelCase = self.elapsed_time / (value - self.start_value)
else:
UpperCamelCase = None
if value >= self.total:
UpperCamelCase = self.total
UpperCamelCase = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
UpperCamelCase = self.average_time_per_item * (self.total - value)
self.update_bar(lowerCamelCase_ )
UpperCamelCase = value
UpperCamelCase = current_time
if self.average_time_per_item is None:
UpperCamelCase = 1
else:
UpperCamelCase = max(int(self.update_every / self.average_time_per_item ) , 1 )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict=None ):
"""simple docstring"""
UpperCamelCase = """ """ * (len(str(self.total ) ) - len(str(lowerCamelCase_ ) )) + str(lowerCamelCase_ )
if self.elapsed_time is None:
UpperCamelCase = f"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
UpperCamelCase = f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"""
else:
UpperCamelCase = (
f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"""
f""" {format_time(self.predicted_remaining )}"""
)
self.label += f""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f""", {self.comment}]"""
self.display()
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
UpperCamelCase = disp.display(disp.HTML(self.html_code ) , display_id=lowerCamelCase_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("""""" ) )
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
def __init__( self : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str]=None ):
"""simple docstring"""
super().__init__(lowerCamelCase_ )
UpperCamelCase = None if column_names is None else [column_names]
UpperCamelCase = None
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
UpperCamelCase = disp.display(disp.HTML(self.html_code ) , display_id=lowerCamelCase_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
if self.inner_table is None:
UpperCamelCase = [list(values.keys() ), list(values.values() )]
else:
UpperCamelCase = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowerCamelCase_ )
UpperCamelCase = columns
self.inner_table.append([values[c] for c in columns] )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : str=300 ):
"""simple docstring"""
UpperCamelCase = NotebookProgressBar(lowerCamelCase_ , prefix=lowerCamelCase_ , parent=self , width=lowerCamelCase_ )
return self.child_bar
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = None
self.display()
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
def __init__( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = False
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] , **lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = """Epoch""" if args.evaluation_strategy == IntervalStrategy.EPOCH else """Step"""
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = [self.first_column] + ["""Training Loss"""]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("""Validation Loss""" )
UpperCamelCase = NotebookTrainingTracker(state.max_steps , lowerCamelCase_ )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple , **lowerCamelCase_ : List[Any] ):
"""simple docstring"""
UpperCamelCase = int(state.epoch ) if int(state.epoch ) == state.epoch else f"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1 , comment=f"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , )
UpperCamelCase = False
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Dict , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any]=None , **lowerCamelCase_ : List[str] ):
"""simple docstring"""
if not has_length(lowerCamelCase_ ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
UpperCamelCase = self.training_tracker.add_child(len(lowerCamelCase_ ) )
else:
UpperCamelCase = NotebookProgressBar(len(lowerCamelCase_ ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Tuple , **lowerCamelCase_ : Any ):
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
UpperCamelCase = None
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int=None , **lowerCamelCase_ : List[str] ):
"""simple docstring"""
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
UpperCamelCase = {"""Training Loss""": logs["""loss"""]}
# First column is necessarily Step sine we're not in epoch eval strategy
UpperCamelCase = state.global_step
self.training_tracker.write_line(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple=None , **lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
if self.training_tracker is not None:
UpperCamelCase = {"""Training Loss""": """No log""", """Validation Loss""": """No log"""}
for log in reversed(state.log_history ):
if "loss" in log:
UpperCamelCase = log["""loss"""]
break
if self.first_column == "Epoch":
UpperCamelCase = int(state.epoch )
else:
UpperCamelCase = state.global_step
UpperCamelCase = """eval"""
for k in metrics:
if k.endswith("""_loss""" ):
UpperCamelCase = re.sub(R"""\_loss$""" , """""" , lowerCamelCase_ )
UpperCamelCase = metrics.pop("""total_flos""" , lowerCamelCase_ )
UpperCamelCase = metrics.pop("""epoch""" , lowerCamelCase_ )
UpperCamelCase = metrics.pop(f"""{metric_key_prefix}_runtime""" , lowerCamelCase_ )
UpperCamelCase = metrics.pop(f"""{metric_key_prefix}_samples_per_second""" , lowerCamelCase_ )
UpperCamelCase = metrics.pop(f"""{metric_key_prefix}_steps_per_second""" , lowerCamelCase_ )
UpperCamelCase = metrics.pop(f"""{metric_key_prefix}_jit_compilation_time""" , lowerCamelCase_ )
for k, v in metrics.items():
if k == f"""{metric_key_prefix}_loss""":
UpperCamelCase = v
else:
UpperCamelCase = k.split("""_""" )
UpperCamelCase = """ """.join([part.capitalize() for part in splits[1:]] )
UpperCamelCase = v
self.training_tracker.write_line(lowerCamelCase_ )
self.training_tracker.remove_child()
UpperCamelCase = None
# Evaluation takes a long time so we should force the next update.
UpperCamelCase = True
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] , **lowerCamelCase_ : List[str] ):
"""simple docstring"""
self.training_tracker.update(
state.global_step , comment=f"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" , force_update=lowerCamelCase_ )
UpperCamelCase = None
| 343 | def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool:
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(UpperCamelCase_ ) )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool:
'''simple docstring'''
# Base Case
if index == len(UpperCamelCase_ ):
return True
# Recursive Step
for i in range(UpperCamelCase_ ):
if valid_coloring(graph[index] , UpperCamelCase_ , UpperCamelCase_ ):
# Color current vertex
UpperCamelCase = i
# Validate coloring
if util_color(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , index + 1 ):
return True
# Backtrack
UpperCamelCase = -1
return False
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> list[int]:
'''simple docstring'''
UpperCamelCase = [-1] * len(UpperCamelCase_ )
if util_color(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , 0 ):
return colored_vertices
return []
| 343 | 1 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowercase( UpperCamelCase_ ) -> list[list[float]]:
'''simple docstring'''
UpperCamelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(UpperCamelCase_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
UpperCamelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
UpperCamelCase = [[0.0, 0.0], [0.0, 0.0]]
UpperCamelCase , UpperCamelCase = matrix[1][1], matrix[0][0]
UpperCamelCase , UpperCamelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(UpperCamelCase_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(UpperCamelCase_ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
UpperCamelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
UpperCamelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
UpperCamelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
UpperCamelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
UpperCamelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
UpperCamelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
UpperCamelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
UpperCamelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
UpperCamelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
UpperCamelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
UpperCamelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
UpperCamelCase = array(UpperCamelCase_ )
for i in range(3 ):
for j in range(3 ):
UpperCamelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
UpperCamelCase = array(UpperCamelCase_ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(UpperCamelCase_ )
# Calculate the inverse of the matrix
return [[float(d(UpperCamelCase_ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 343 | import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
_SCREAMING_SNAKE_CASE = {
"""allenai/led-base-16384""": 1_6_3_8_4,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase( ) -> List[str]:
'''simple docstring'''
UpperCamelCase = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCamelCase = bs[:]
UpperCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase_ )
cs.append(2**8 + n )
n += 1
UpperCamelCase = [chr(UpperCamelCase_ ) for n in cs]
return dict(zip(UpperCamelCase_ , UpperCamelCase_ ) )
def lowercase( UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
UpperCamelCase = set()
UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase = char
return pairs
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str="replace" , lowerCamelCase_ : Any="<s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : str="<s>" , lowerCamelCase_ : str="<unk>" , lowerCamelCase_ : int="<pad>" , lowerCamelCase_ : List[str]="<mask>" , lowerCamelCase_ : str=False , **lowerCamelCase_ : str , ):
"""simple docstring"""
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding="""utf-8""" ) as vocab_handle:
UpperCamelCase = json.load(lowerCamelCase_ )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = errors # how to handle errors in decoding
UpperCamelCase = bytes_to_unicode()
UpperCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ , encoding="""utf-8""" ) as merges_handle:
UpperCamelCase = merges_handle.read().split("""\n""" )[1:-1]
UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
UpperCamelCase = {}
UpperCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return len(self.encoder )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Dict ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase = tuple(lowerCamelCase_ )
UpperCamelCase = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
UpperCamelCase = min(lowerCamelCase_ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase = bigram
UpperCamelCase = []
UpperCamelCase = 0
while i < len(lowerCamelCase_ ):
try:
UpperCamelCase = word.index(lowerCamelCase_ , lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase = tuple(lowerCamelCase_ )
UpperCamelCase = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
UpperCamelCase = get_pairs(lowerCamelCase_ )
UpperCamelCase = """ """.join(lowerCamelCase_ )
UpperCamelCase = word
return word
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = []
for token in re.findall(self.pat , lowerCamelCase_ ):
UpperCamelCase = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) )
return bpe_tokens
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : str ):
"""simple docstring"""
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Any ):
"""simple docstring"""
return self.decoder.get(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = """""".join(lowerCamelCase_ )
UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def lowerCamelCase_ ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + """\n""" )
UpperCamelCase = 0
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
UpperCamelCase = token_index
writer.write(""" """.join(lowerCamelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=False , **lowerCamelCase_ : Any ):
"""simple docstring"""
UpperCamelCase = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
UpperCamelCase = """ """ + text
return (text, kwargs)
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[bool] = None , ):
"""simple docstring"""
UpperCamelCase = super()._pad(
encoded_inputs=lowerCamelCase_ , max_length=lowerCamelCase_ , padding_strategy=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
# Load from model defaults
if return_attention_mask is None:
UpperCamelCase = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCamelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCamelCase = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ )
if needs_to_be_padded:
UpperCamelCase = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCamelCase = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCamelCase = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 343 | 1 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowercase( UpperCamelCase_ , UpperCamelCase_=10 ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = []
for _ in range(UpperCamelCase_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowercase( UpperCamelCase_ , UpperCamelCase_=10 ) -> int:
'''simple docstring'''
UpperCamelCase = []
for step in range(UpperCamelCase_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = os.path.join(UpperCamelCase_ , """schedule.bin""" )
torch.save(scheduler.state_dict() , UpperCamelCase_ )
UpperCamelCase = torch.load(UpperCamelCase_ )
scheduler.load_state_dict(UpperCamelCase_ )
return lrs
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for a, b in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertAlmostEqual(lowerCamelCase_ , lowerCamelCase_ , delta=lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCamelCase_ )
UpperCamelCase = torch.tensor([0.4, 0.2, -0.5] )
UpperCamelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCamelCase = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
UpperCamelCase = criterion(lowerCamelCase_ , lowerCamelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCamelCase_ )
UpperCamelCase = torch.tensor([0.4, 0.2, -0.5] )
UpperCamelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCamelCase = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowerCamelCase_ , weight_decay=0.0 , relative_step=lowerCamelCase_ , scale_parameter=lowerCamelCase_ , warmup_init=lowerCamelCase_ , )
for _ in range(1000 ):
UpperCamelCase = criterion(lowerCamelCase_ , lowerCamelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
__lowerCAmelCase = nn.Linear(50 , 50 ) if is_torch_available() else None
__lowerCAmelCase = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
__lowerCAmelCase = 10
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Dict , lowerCamelCase_ : Any=None ):
"""simple docstring"""
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for a, b in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertAlmostEqual(lowerCamelCase_ , lowerCamelCase_ , delta=lowerCamelCase_ , msg=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = {"""num_warmup_steps""": 2, """num_training_steps""": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
UpperCamelCase = {
get_constant_schedule: ({}, [1_0.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"""num_warmup_steps""": 4},
[0.0, 2.5, 5.0, 7.5, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 1_0.0, 8.7_5, 7.5, 6.2_5, 5.0, 3.7_5, 2.5, 1.2_5],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 1_0.0, 9.6_1, 8.5_3, 6.9_1, 5.0, 3.0_8, 1.4_6, 0.3_8],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, """num_cycles""": 2},
[0.0, 5.0, 1_0.0, 8.5_3, 5.0, 1.4_6, 1_0.0, 8.5_3, 5.0, 1.4_6],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, """power""": 2.0, """lr_end""": 1E-7},
[0.0, 5.0, 1_0.0, 7.6_5_6, 5.6_2_5, 3.9_0_6, 2.5, 1.4_0_6, 0.6_2_5, 0.1_5_6],
),
get_inverse_sqrt_schedule: (
{"""num_warmup_steps""": 2},
[0.0, 5.0, 1_0.0, 8.1_6_5, 7.0_7_1, 6.3_2_5, 5.7_7_4, 5.3_4_5, 5.0, 4.7_1_4],
),
}
for scheduler_func, data in scheds.items():
UpperCamelCase , UpperCamelCase = data
UpperCamelCase = scheduler_func(self.optimizer , **lowerCamelCase_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
UpperCamelCase = unwrap_schedule(lowerCamelCase_ , self.num_steps )
self.assertListAlmostEqual(
lowerCamelCase_ , lowerCamelCase_ , tol=1E-2 , msg=f"""failed for {scheduler_func} in normal scheduler""" , )
UpperCamelCase = scheduler_func(self.optimizer , **lowerCamelCase_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowerCamelCase_ ) # wrap to test picklability of the schedule
UpperCamelCase = unwrap_and_save_reload_schedule(lowerCamelCase_ , self.num_steps )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ , msg=f"""failed for {scheduler_func} in save and reload""" )
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Optional[int] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = fn
def __call__( self : int , *lowerCamelCase_ : int , **lowerCamelCase_ : str ):
"""simple docstring"""
return self.fn(*lowerCamelCase_ , **lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = list(map(self , scheduler.lr_lambdas ) )
| 343 | import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_SCREAMING_SNAKE_CASE = (
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
_SCREAMING_SNAKE_CASE = (
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
_SCREAMING_SNAKE_CASE = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
_SCREAMING_SNAKE_CASE = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
_SCREAMING_SNAKE_CASE = (
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 1_4]),
("""2H 5D 3C AS 5S""", False, [1_4, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [1_4, 1_3, 1_2, 1_1, 1_0]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
_SCREAMING_SNAKE_CASE = (
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
_SCREAMING_SNAKE_CASE = (
("""JH AH TH KH QH""", 2_3),
("""JH 9H TH KH QH""", 2_2),
("""JC KH JS JD JH""", 2_1),
("""KH KC 3S 3H 3D""", 2_0),
("""8C 9C 5C 3C TC""", 1_9),
("""JS QS 9H TS KH""", 1_8),
("""7C 7S KH 2H 7H""", 1_7),
("""3C KH 5D 5S KH""", 1_6),
("""QH 8H KD JH 8S""", 1_5),
("""2D 6D 9D TH 7D""", 1_4),
)
def lowercase( ) -> Dict:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = randrange(len(UpperCamelCase_ ) ), randrange(len(UpperCamelCase_ ) )
UpperCamelCase = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
UpperCamelCase , UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowercase( UpperCamelCase_ = 100 ) -> List[Any]:
'''simple docstring'''
return (generate_random_hand() for _ in range(UpperCamelCase_ ))
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
UpperCamelCase = PokerHand(UpperCamelCase_ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected
def lowercase( ) -> Dict:
'''simple docstring'''
UpperCamelCase = [PokerHand(UpperCamelCase_ ) for hand in SORTED_HANDS]
UpperCamelCase = poker_hands.copy()
shuffle(UpperCamelCase_ )
UpperCamelCase = chain(sorted(UpperCamelCase_ ) )
for index, hand in enumerate(UpperCamelCase_ ):
assert hand == poker_hands[index]
def lowercase( ) -> Union[str, Any]:
'''simple docstring'''
# Test that five high straights are compared correctly.
UpperCamelCase = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=UpperCamelCase_ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowercase( ) -> str:
'''simple docstring'''
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
UpperCamelCase = PokerHand("""2C 4S AS 3D 5C""" )
UpperCamelCase = True
UpperCamelCase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowercase( ) -> int:
'''simple docstring'''
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
UpperCamelCase = 0
UpperCamelCase = os.path.abspath(os.path.dirname(UpperCamelCase_ ) )
UpperCamelCase = os.path.join(UpperCamelCase_ , """poker_hands.txt""" )
with open(UpperCamelCase_ ) as file_hand:
for line in file_hand:
UpperCamelCase = line[:14].strip()
UpperCamelCase = line[15:].strip()
UpperCamelCase , UpperCamelCase = PokerHand(UpperCamelCase_ ), PokerHand(UpperCamelCase_ )
UpperCamelCase = player.compare_with(UpperCamelCase_ )
if output == "Win":
answer += 1
assert answer == 376
| 343 | 1 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_SCREAMING_SNAKE_CASE = """bart"""
_SCREAMING_SNAKE_CASE = True
@st.cache(allow_output_mutation=UpperCamelCase_ )
def lowercase( ) -> List[Any]:
'''simple docstring'''
if LOAD_DENSE_INDEX:
UpperCamelCase = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
UpperCamelCase = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
UpperCamelCase = qar_model.eval()
else:
UpperCamelCase , UpperCamelCase = (None, None)
if MODEL_TYPE == "bart":
UpperCamelCase = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
UpperCamelCase = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
UpperCamelCase = sas_model.eval()
else:
UpperCamelCase , UpperCamelCase = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=UpperCamelCase_ )
def lowercase( ) -> Union[str, Any]:
'''simple docstring'''
if LOAD_DENSE_INDEX:
UpperCamelCase = faiss.StandardGpuResources()
UpperCamelCase = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
UpperCamelCase = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
UpperCamelCase = faiss.IndexFlatIP(128 )
UpperCamelCase = faiss.index_cpu_to_gpu(UpperCamelCase_ , 1 , UpperCamelCase_ )
wikiaab_gpu_index_flat.add(UpperCamelCase_ ) # TODO fix for larger GPU
else:
UpperCamelCase , UpperCamelCase = (None, None)
UpperCamelCase = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=UpperCamelCase_ )
def lowercase( ) -> str:
'''simple docstring'''
UpperCamelCase = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
UpperCamelCase = elia["""train_eli5"""]
UpperCamelCase = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
UpperCamelCase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(UpperCamelCase_ )
return (elia_train, eli5_train_q_index)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = load_indexes()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = load_models()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = load_train_data()
def lowercase( UpperCamelCase_ , UpperCamelCase_=10 ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = embed_questions_for_retrieval([question] , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase , UpperCamelCase = eli5_train_q_index.search(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = [elia_train[int(UpperCamelCase_ )] for i in I[0]]
return nn_examples
def lowercase( UpperCamelCase_ , UpperCamelCase_="wiki40b" , UpperCamelCase_="dense" , UpperCamelCase_=10 ) -> Dict:
'''simple docstring'''
if source == "none":
UpperCamelCase , UpperCamelCase = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
UpperCamelCase , UpperCamelCase = query_qa_dense_index(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else:
UpperCamelCase , UpperCamelCase = query_es_index(
UpperCamelCase_ , UpperCamelCase_ , index_name="""english_wiki40b_snippets_100w""" , n_results=UpperCamelCase_ , )
UpperCamelCase = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
UpperCamelCase = """question: {} context: {}""".format(UpperCamelCase_ , UpperCamelCase_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda UpperCamelCase_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda UpperCamelCase_ : None),
} )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=64 , UpperCamelCase_=256 , UpperCamelCase_=False , UpperCamelCase_=2 , UpperCamelCase_=0.9_5 , UpperCamelCase_=0.8 ) -> Optional[int]:
'''simple docstring'''
with torch.no_grad():
UpperCamelCase = qa_sas_generate(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , num_answers=1 , num_beams=UpperCamelCase_ , min_len=UpperCamelCase_ , max_len=UpperCamelCase_ , do_sample=UpperCamelCase_ , temp=UpperCamelCase_ , top_p=UpperCamelCase_ , top_k=UpperCamelCase_ , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_SCREAMING_SNAKE_CASE = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_SCREAMING_SNAKE_CASE = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_SCREAMING_SNAKE_CASE = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_SCREAMING_SNAKE_CASE = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_SCREAMING_SNAKE_CASE = st.sidebar.checkbox("""Demo options""")
if demo_options:
_SCREAMING_SNAKE_CASE = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_SCREAMING_SNAKE_CASE = action_list.index(action_st)
_SCREAMING_SNAKE_CASE = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_SCREAMING_SNAKE_CASE = show_type == """Show full text of passages"""
else:
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_SCREAMING_SNAKE_CASE = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_SCREAMING_SNAKE_CASE = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_SCREAMING_SNAKE_CASE = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_SCREAMING_SNAKE_CASE = """wiki40b"""
_SCREAMING_SNAKE_CASE = """dense"""
_SCREAMING_SNAKE_CASE = """beam"""
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 6_4
_SCREAMING_SNAKE_CASE = 2_5_6
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = st.sidebar.checkbox("""Generation options""")
if generate_options:
_SCREAMING_SNAKE_CASE = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_SCREAMING_SNAKE_CASE = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_SCREAMING_SNAKE_CASE = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=2_5_6, value=6_4, step=8, format=None, key=None
)
_SCREAMING_SNAKE_CASE = st.sidebar.slider(
"""Maximum generation length""", min_value=6_4, max_value=5_1_2, value=2_5_6, step=1_6, format=None, key=None
)
if sampled == "beam":
_SCREAMING_SNAKE_CASE = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_SCREAMING_SNAKE_CASE = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_SCREAMING_SNAKE_CASE = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_SCREAMING_SNAKE_CASE = None
# start main text
_SCREAMING_SNAKE_CASE = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_SCREAMING_SNAKE_CASE = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_SCREAMING_SNAKE_CASE = st.text_input("""Enter your question here:""", """""")
else:
_SCREAMING_SNAKE_CASE = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = make_support(question, source=wiki_source, method="""dense""", n_results=1_0)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = make_support(question, source=wiki_source, method="""sparse""", n_results=1_0)
_SCREAMING_SNAKE_CASE = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_SCREAMING_SNAKE_CASE = support_list[:1_0]
_SCREAMING_SNAKE_CASE = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = make_support(question, source=wiki_source, method=index_type, n_results=1_0)
if action in [0, 3]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_SCREAMING_SNAKE_CASE = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_SCREAMING_SNAKE_CASE = res[1].strip()
if sec_titles == "":
_SCREAMING_SNAKE_CASE = """[{}]({})""".format(res[0], wiki_url)
else:
_SCREAMING_SNAKE_CASE = sec_titles.split(""" & """)
_SCREAMING_SNAKE_CASE = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_SCREAMING_SNAKE_CASE = find_nearest_training(question)
_SCREAMING_SNAKE_CASE = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_SCREAMING_SNAKE_CASE = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_SCREAMING_SNAKE_CASE = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 343 | import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = """xlnet"""
__lowerCAmelCase = ["""mems"""]
__lowerCAmelCase = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , lowerCamelCase_ : Any=3_2000 , lowerCamelCase_ : Dict=1024 , lowerCamelCase_ : List[str]=24 , lowerCamelCase_ : Dict=16 , lowerCamelCase_ : List[Any]=4096 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Union[str, Any]="bi" , lowerCamelCase_ : Optional[Any]=0.0_2 , lowerCamelCase_ : Optional[int]=1E-12 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : Union[str, Any]=512 , lowerCamelCase_ : Any=None , lowerCamelCase_ : str=True , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Optional[int]=-1 , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Union[str, Any]="last" , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : str="tanh" , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Dict=5 , lowerCamelCase_ : str=5 , lowerCamelCase_ : Optional[int]=5 , lowerCamelCase_ : Any=1 , lowerCamelCase_ : int=2 , **lowerCamelCase_ : List[Any] , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = d_model
UpperCamelCase = n_layer
UpperCamelCase = n_head
if d_model % n_head != 0:
raise ValueError(f"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
UpperCamelCase = d_model // n_head
UpperCamelCase = ff_activation
UpperCamelCase = d_inner
UpperCamelCase = untie_r
UpperCamelCase = attn_type
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = dropout
UpperCamelCase = mem_len
UpperCamelCase = reuse_len
UpperCamelCase = bi_data
UpperCamelCase = clamp_len
UpperCamelCase = same_length
UpperCamelCase = summary_type
UpperCamelCase = summary_use_proj
UpperCamelCase = summary_activation
UpperCamelCase = summary_last_dropout
UpperCamelCase = start_n_top
UpperCamelCase = end_n_top
UpperCamelCase = bos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"""The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"""
""" instead.""" , lowerCamelCase_ , )
UpperCamelCase = kwargs["""use_cache"""]
UpperCamelCase = use_mems_eval
UpperCamelCase = use_mems_train
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
@property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 343 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """spiece.model"""}
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
_SCREAMING_SNAKE_CASE = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = 4
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = """left"""
def __init__( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : Optional[Any]="<s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : Union[str, Any]="<unk>" , lowerCamelCase_ : Tuple="<sep>" , lowerCamelCase_ : Optional[Any]="<pad>" , lowerCamelCase_ : Optional[Any]="<cls>" , lowerCamelCase_ : Any="<mask>" , lowerCamelCase_ : Optional[int]=["<eop>", "<eod>"] , lowerCamelCase_ : Optional[Dict[str, Any]] = None , **lowerCamelCase_ : int , ):
"""simple docstring"""
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
UpperCamelCase = 3
UpperCamelCase = do_lower_case
UpperCamelCase = remove_space
UpperCamelCase = keep_accents
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase_ )
@property
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
return len(self.sp_model )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self : str , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : str ):
"""simple docstring"""
if self.remove_space:
UpperCamelCase = """ """.join(inputs.strip().split() )
else:
UpperCamelCase = inputs
UpperCamelCase = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
UpperCamelCase = unicodedata.normalize("""NFKD""" , lowerCamelCase_ )
UpperCamelCase = """""".join([c for c in outputs if not unicodedata.combining(lowerCamelCase_ )] )
if self.do_lower_case:
UpperCamelCase = outputs.lower()
return outputs
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = self.preprocess_text(lowerCamelCase_ )
UpperCamelCase = self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ )
UpperCamelCase = []
for piece in pieces:
if len(lowerCamelCase_ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
UpperCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase_ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCamelCase = cur_pieces[1:]
else:
UpperCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCamelCase_ )
else:
new_pieces.append(lowerCamelCase_ )
return new_pieces
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Dict ):
"""simple docstring"""
return self.sp_model.PieceToId(lowerCamelCase_ )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
return self.sp_model.IdToPiece(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = """""".join(lowerCamelCase_ ).replace(lowerCamelCase_ , """ """ ).strip()
return out_string
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = None , lowerCamelCase_ : bool = True , **lowerCamelCase_ : Tuple , ):
"""simple docstring"""
UpperCamelCase = kwargs.pop("""use_source_tokenizer""" , lowerCamelCase_ )
UpperCamelCase = self.convert_ids_to_tokens(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCamelCase = []
UpperCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_ ) )
UpperCamelCase = []
sub_texts.append(lowerCamelCase_ )
else:
current_sub_text.append(lowerCamelCase_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCamelCase = """""".join(lowerCamelCase_ )
UpperCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCamelCase = self.clean_up_tokenization(lowerCamelCase_ )
return clean_text
else:
return text
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is not None:
return ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1, 1]
return ([0] * len(lowerCamelCase_ )) + [1, 1]
def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_ , """wb""" ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (out_vocab_file,)
| 343 | import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures""")
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy-config.json""")
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = 0
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
UpperCamelCase = WavaVecaFeatureExtractor(**lowerCamelCase_ )
# save in new folder
model_config.save_pretrained(lowerCamelCase_ )
config.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
# make sure private variable is not incorrectly saved
UpperCamelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , revision="""aaaaaa""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
with self.assertRaises(lowerCamelCase_ ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase_ ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase_ ):
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCamelCase = CustomFeatureExtractor.from_pretrained(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = True
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# If remote code is not set, the default is to use local
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(lowerCamelCase_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 343 | 1 |
import qiskit
def lowercase( UpperCamelCase_ = 2 ) -> qiskit.result.counts.Counts:
'''simple docstring'''
UpperCamelCase = qubits
# Using Aer's simulator
UpperCamelCase = qiskit.Aer.get_backend("""aer_simulator""" )
# Creating a Quantum Circuit acting on the q register
UpperCamelCase = qiskit.QuantumCircuit(UpperCamelCase_ , UpperCamelCase_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , UpperCamelCase_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , UpperCamelCase_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(UpperCamelCase_ ) ) , list(range(UpperCamelCase_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
UpperCamelCase = qiskit.execute(UpperCamelCase_ , UpperCamelCase_ , shots=1000 )
return job.result().get_counts(UpperCamelCase_ )
if __name__ == "__main__":
print(F'''Total count for various states are: {quantum_entanglement(3)}''')
| 343 | import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
def lowercase( UpperCamelCase_ ) -> Dict:
'''simple docstring'''
# word like '180' or '身高' or '神'
for char in word:
UpperCamelCase = ord(UpperCamelCase_ )
if not _is_chinese_char(UpperCamelCase_ ):
return 0
return 1
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = set()
for token in tokens:
UpperCamelCase = len(UpperCamelCase_ ) > 1 and is_chinese(UpperCamelCase_ )
if chinese_word:
word_set.add(UpperCamelCase_ )
UpperCamelCase = list(UpperCamelCase_ )
return word_list
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
UpperCamelCase = max([len(UpperCamelCase_ ) for w in chinese_word_set] )
UpperCamelCase = bert_tokens
UpperCamelCase , UpperCamelCase = 0, len(UpperCamelCase_ )
while start < end:
UpperCamelCase = True
if is_chinese(bert_word[start] ):
UpperCamelCase = min(end - start , UpperCamelCase_ )
for i in range(UpperCamelCase_ , 1 , -1 ):
UpperCamelCase = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCamelCase = """##""" + bert_word[j]
UpperCamelCase = start + i
UpperCamelCase = False
break
if single_word:
start += 1
return bert_word
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
UpperCamelCase = []
for i in range(0 , len(UpperCamelCase_ ) , 100 ):
UpperCamelCase = ltp_tokenizer.seg(lines[i : i + 100] )[0]
UpperCamelCase = [get_chinese_word(UpperCamelCase_ ) for r in res]
ltp_res.extend(UpperCamelCase_ )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
UpperCamelCase = []
for i in range(0 , len(UpperCamelCase_ ) , 100 ):
UpperCamelCase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
UpperCamelCase = []
for input_ids, chinese_word in zip(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase = []
for id in input_ids:
UpperCamelCase = bert_tokenizer._convert_id_to_token(UpperCamelCase_ )
input_tokens.append(UpperCamelCase_ )
UpperCamelCase = add_sub_symbol(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCamelCase_ ):
if token[:2] == "##":
UpperCamelCase = token[2:]
# save chinese tokens' pos
if len(UpperCamelCase_ ) == 1 and _is_chinese_char(ord(UpperCamelCase_ ) ):
ref_id.append(UpperCamelCase_ )
ref_ids.append(UpperCamelCase_ )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
return ref_ids
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
UpperCamelCase = f.readlines()
UpperCamelCase = [line.strip() for line in data if len(UpperCamelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCamelCase = LTP(args.ltp ) # faster in GPU device
UpperCamelCase = BertTokenizer.from_pretrained(args.bert )
UpperCamelCase = prepare_ref(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
UpperCamelCase = [json.dumps(UpperCamelCase_ ) + """\n""" for ref in ref_ids]
f.writelines(UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
main(args)
| 343 | 1 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = MgpstrTokenizer
__lowerCAmelCase = False
__lowerCAmelCase = {}
__lowerCAmelCase = False
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCamelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + """\n""" )
def lowerCamelCase_ ( self : str , **lowerCamelCase_ : int ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
UpperCamelCase = """tester"""
UpperCamelCase = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.get_tokenizers(do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCamelCase = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
UpperCamelCase = tokenizer.encode([special_token] , add_special_tokens=lowerCamelCase_ )
self.assertEqual(len(lowerCamelCase_ ) , 1 )
UpperCamelCase = tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
self.assertTrue(special_token not in decoded )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCamelCase , UpperCamelCase = self.get_input_output_texts(lowerCamelCase_ )
UpperCamelCase = tokenizer.tokenize(lowerCamelCase_ )
UpperCamelCase = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
UpperCamelCase = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertNotEqual(len(lowerCamelCase_ ) , 0 )
UpperCamelCase = tokenizer.decode(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual(text_a.replace(""" """ , """""" ) , lowerCamelCase_ )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
| 343 | import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def __init__( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=13 , lowerCamelCase_ : Union[str, Any]=30 , lowerCamelCase_ : str=2 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : Union[str, Any]=5 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : Any=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : Union[str, Any]=10 , lowerCamelCase_ : Optional[Any]=0.0_2 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = num_patches + 1
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
return config, pixel_values
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = FlaxViTModel(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase = (self.image_size, self.image_size)
UpperCamelCase = (self.patch_size, self.patch_size)
UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.type_sequence_label_size
UpperCamelCase = FlaxViTForImageClassification(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = FlaxViTForImageClassification(lowerCamelCase_ )
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = FlaxViTModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = model_class(lowerCamelCase_ )
@jax.jit
def model_jitted(lowerCamelCase_ : Any , **lowerCamelCase_ : Any ):
return model(pixel_values=lowerCamelCase_ , **lowerCamelCase_ )
with self.subTest("""JIT Enabled""" ):
UpperCamelCase = model_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCamelCase = model_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
UpperCamelCase = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowerCamelCase_ )
| 343 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = """sew-d"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : List[Any]=32 , lowerCamelCase_ : str=768 , lowerCamelCase_ : Dict=12 , lowerCamelCase_ : str=12 , lowerCamelCase_ : str=3072 , lowerCamelCase_ : Optional[int]=2 , lowerCamelCase_ : str=512 , lowerCamelCase_ : Union[str, Any]=256 , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Any=("p2c", "c2p") , lowerCamelCase_ : Any="layer_norm" , lowerCamelCase_ : Optional[Any]="gelu_python" , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Optional[Any]=0.0 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : List[str]=0.0_2 , lowerCamelCase_ : Optional[Any]=1E-7 , lowerCamelCase_ : Optional[int]=1E-5 , lowerCamelCase_ : Union[str, Any]="group" , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : int=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowerCamelCase_ : Union[str, Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase_ : Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : str=128 , lowerCamelCase_ : Tuple=16 , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : List[Any]=0.0_5 , lowerCamelCase_ : Dict=10 , lowerCamelCase_ : Tuple=2 , lowerCamelCase_ : Tuple=0.0 , lowerCamelCase_ : Tuple=10 , lowerCamelCase_ : List[str]=0 , lowerCamelCase_ : Any="mean" , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : str=256 , lowerCamelCase_ : List[Any]=0 , lowerCamelCase_ : str=1 , lowerCamelCase_ : List[Any]=2 , **lowerCamelCase_ : int , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
UpperCamelCase = hidden_size
UpperCamelCase = feat_extract_norm
UpperCamelCase = feat_extract_activation
UpperCamelCase = list(lowerCamelCase_ )
UpperCamelCase = list(lowerCamelCase_ )
UpperCamelCase = list(lowerCamelCase_ )
UpperCamelCase = conv_bias
UpperCamelCase = num_conv_pos_embeddings
UpperCamelCase = num_conv_pos_embedding_groups
UpperCamelCase = len(self.conv_dim )
UpperCamelCase = num_hidden_layers
UpperCamelCase = intermediate_size
UpperCamelCase = squeeze_factor
UpperCamelCase = max_position_embeddings
UpperCamelCase = position_buckets
UpperCamelCase = share_att_key
UpperCamelCase = relative_attention
UpperCamelCase = norm_rel_ebd
UpperCamelCase = list(lowerCamelCase_ )
UpperCamelCase = hidden_act
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = feat_proj_dropout
UpperCamelCase = final_dropout
UpperCamelCase = layer_norm_eps
UpperCamelCase = feature_layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase = apply_spec_augment
UpperCamelCase = mask_time_prob
UpperCamelCase = mask_time_length
UpperCamelCase = mask_time_min_masks
UpperCamelCase = mask_feature_prob
UpperCamelCase = mask_feature_length
UpperCamelCase = mask_feature_min_masks
# ctc loss
UpperCamelCase = ctc_loss_reduction
UpperCamelCase = ctc_zero_infinity
# sequence classification
UpperCamelCase = use_weighted_layer_sum
UpperCamelCase = classifier_proj_size
@property
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 343 | import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str=13 , lowerCamelCase_ : Any=7 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Dict=99 , lowerCamelCase_ : str=24 , lowerCamelCase_ : Optional[int]=2 , lowerCamelCase_ : List[str]=6 , lowerCamelCase_ : List[Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any=512 , lowerCamelCase_ : List[Any]=16 , lowerCamelCase_ : List[Any]=2 , lowerCamelCase_ : int=0.0_2 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[Any]=1000 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = range_bbox
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase = bbox[i, j, 3]
UpperCamelCase = bbox[i, j, 1]
UpperCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase = bbox[i, j, 2]
UpperCamelCase = bbox[i, j, 0]
UpperCamelCase = t
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , ):
"""simple docstring"""
UpperCamelCase = LiltModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = LiltForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , ):
"""simple docstring"""
UpperCamelCase = LiltForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCAmelCase = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict ):
"""simple docstring"""
return True
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = LiltModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = LiltModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(lowerCamelCase_ )
UpperCamelCase = torch.tensor([[1, 2]] , device=lowerCamelCase_ )
UpperCamelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(input_ids=lowerCamelCase_ , bbox=lowerCamelCase_ )
UpperCamelCase = torch.Size([1, 2, 768] )
UpperCamelCase = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=lowerCamelCase_ , )
self.assertTrue(outputs.last_hidden_state.shape , lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowerCamelCase_ , atol=1E-3 ) )
| 343 | 1 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_SCREAMING_SNAKE_CASE = logging.getLogger()
def lowercase( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""-f""" )
UpperCamelCase = parser.parse_args()
return args.f
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase_ )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(lowerCamelCase_ , """argv""" , lowerCamelCase_ ):
UpperCamelCase = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase_ , 0.6_6_6 )
@slow
@require_torch_non_multi_gpu
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(lowerCamelCase_ )
UpperCamelCase = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowerCamelCase_ )
UpperCamelCase = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowerCamelCase_ )
| 343 | import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def __init__( self : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict=7 , lowerCamelCase_ : str=3 , lowerCamelCase_ : Any=30 , lowerCamelCase_ : str=400 , lowerCamelCase_ : str=True , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Dict=0.9 , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Dict=[0.5, 0.5, 0.5] , lowerCamelCase_ : Any=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
UpperCamelCase = size if size is not None else {"""shortest_edge""": 30}
UpperCamelCase = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize_and_center_crop
UpperCamelCase = size
UpperCamelCase = crop_pct
UpperCamelCase = crop_size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = PoolFormerImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """crop_pct""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 343 | 1 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 343 | def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> float:
'''simple docstring'''
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(UpperCamelCase_ ) * abs(UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 343 | 1 |
from __future__ import annotations
import numpy as np
def lowercase( UpperCamelCase_ ) -> tuple[np.ndarray, np.ndarray]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = np.shape(UpperCamelCase_ )
if rows != columns:
UpperCamelCase = (
"""'table' has to be of square shaped array but got a """
f"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(UpperCamelCase_ )
UpperCamelCase = np.zeros((rows, columns) )
UpperCamelCase = np.zeros((rows, columns) )
for i in range(UpperCamelCase_ ):
for j in range(UpperCamelCase_ ):
UpperCamelCase = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase_ ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
UpperCamelCase = (table[i][j] - total) / upper[j][j]
UpperCamelCase = 1
for j in range(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase_ ) )
UpperCamelCase = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = """trocr"""
__lowerCAmelCase = ["""past_key_values"""]
__lowerCAmelCase = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[int]=5_0265 , lowerCamelCase_ : Optional[int]=1024 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Any=16 , lowerCamelCase_ : Tuple=4096 , lowerCamelCase_ : Tuple="gelu" , lowerCamelCase_ : List[str]=512 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=0.0 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : Union[str, Any]=2 , lowerCamelCase_ : Tuple=0.0_2 , lowerCamelCase_ : Union[str, Any]=0.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : List[str]=1 , lowerCamelCase_ : Optional[Any]=0 , lowerCamelCase_ : List[Any]=2 , **lowerCamelCase_ : Union[str, Any] , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = d_model
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = activation_function
UpperCamelCase = max_position_embeddings
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = init_std
UpperCamelCase = decoder_layerdrop
UpperCamelCase = use_cache
UpperCamelCase = scale_embedding
UpperCamelCase = use_learned_position_embeddings
UpperCamelCase = layernorm_embedding
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
| 343 | 1 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
debug_launcher(test_script.main )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
debug_launcher(test_ops.main )
| 343 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase ):
__lowerCAmelCase = """swin"""
__lowerCAmelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Any , lowerCamelCase_ : Optional[int]=224 , lowerCamelCase_ : Union[str, Any]=4 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[Any]=96 , lowerCamelCase_ : int=[2, 2, 6, 2] , lowerCamelCase_ : Dict=[3, 6, 12, 24] , lowerCamelCase_ : str=7 , lowerCamelCase_ : Tuple=4.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any="gelu" , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : Optional[Any]=0.0_2 , lowerCamelCase_ : str=1E-5 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : str=None , lowerCamelCase_ : Any=None , **lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = depths
UpperCamelCase = len(lowerCamelCase_ )
UpperCamelCase = num_heads
UpperCamelCase = window_size
UpperCamelCase = mlp_ratio
UpperCamelCase = qkv_bias
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = drop_path_rate
UpperCamelCase = hidden_act
UpperCamelCase = use_absolute_embeddings
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
UpperCamelCase = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
UpperCamelCase , UpperCamelCase = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = version.parse("""1.11""" )
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
return 1E-4
| 343 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
] )
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=lowerCamelCase_ , )
assert hasattr(self , """env""" )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCamelCase = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
UpperCamelCase = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=lowerCamelCase_ , instance_count=lowerCamelCase_ , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase_ , hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=lowerCamelCase_ , py_version="""py36""" , )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
TrainingJobAnalytics(lowerCamelCase_ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = self.create_estimator(lowerCamelCase_ )
# run training
estimator.fit()
# result dataframe
UpperCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , lowerCamelCase_ )
| 343 | import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_SCREAMING_SNAKE_CASE = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
_SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary"""
)
_SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(6_4, 6_4)
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image)
_SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0)
_SCREAMING_SNAKE_CASE = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_SCREAMING_SNAKE_CASE = """Normal"""
if result[0][0] == 1:
_SCREAMING_SNAKE_CASE = """Abnormality detected"""
| 343 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 343 | from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
pass
class SCREAMING_SNAKE_CASE_ :
def __init__( self : List[Any] , lowerCamelCase_ : Any ):
"""simple docstring"""
UpperCamelCase = data
UpperCamelCase = None
def __iter__( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self
UpperCamelCase = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(lowerCamelCase_ )
yield node.data
UpperCamelCase = node.next_node
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = Node(1)
_SCREAMING_SNAKE_CASE = Node(2)
_SCREAMING_SNAKE_CASE = Node(3)
_SCREAMING_SNAKE_CASE = Node(4)
print(root_node.has_loop) # False
_SCREAMING_SNAKE_CASE = root_node.next_node
print(root_node.has_loop) # True
_SCREAMING_SNAKE_CASE = Node(5)
_SCREAMING_SNAKE_CASE = Node(6)
_SCREAMING_SNAKE_CASE = Node(5)
_SCREAMING_SNAKE_CASE = Node(6)
print(root_node.has_loop) # False
_SCREAMING_SNAKE_CASE = Node(1)
print(root_node.has_loop) # False
| 343 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = """trocr"""
__lowerCAmelCase = ["""past_key_values"""]
__lowerCAmelCase = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[int]=5_0265 , lowerCamelCase_ : Optional[int]=1024 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Any=16 , lowerCamelCase_ : Tuple=4096 , lowerCamelCase_ : Tuple="gelu" , lowerCamelCase_ : List[str]=512 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=0.0 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : Union[str, Any]=2 , lowerCamelCase_ : Tuple=0.0_2 , lowerCamelCase_ : Union[str, Any]=0.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : List[str]=1 , lowerCamelCase_ : Optional[Any]=0 , lowerCamelCase_ : List[Any]=2 , **lowerCamelCase_ : Union[str, Any] , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = d_model
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = activation_function
UpperCamelCase = max_position_embeddings
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = init_std
UpperCamelCase = decoder_layerdrop
UpperCamelCase = use_cache
UpperCamelCase = scale_embedding
UpperCamelCase = use_learned_position_embeddings
UpperCamelCase = layernorm_embedding
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
| 343 | import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=lowerCamelCase_ , )
assert hasattr(self , """env""" )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCamelCase = {
"""enabled""": True,
"""processes_per_host""": 8,
}
UpperCamelCase = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
UpperCamelCase = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
UpperCamelCase = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=lowerCamelCase_ , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase_ , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=lowerCamelCase_ , py_version="""py36""" , )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
TrainingJobAnalytics(lowerCamelCase_ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = self.create_estimator(lowerCamelCase_ )
# run training
estimator.fit()
# result dataframe
UpperCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , lowerCamelCase_ )
| 343 | 1 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_SCREAMING_SNAKE_CASE = 1.054571817E-34 # unit of ℏ : J * s
_SCREAMING_SNAKE_CASE = 3E8 # unit of c : m * s^-1
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> dict[str, float]:
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if force < 0:
raise ValueError("""Magnitude of force can not be negative""" )
if distance < 0:
raise ValueError("""Distance can not be negative""" )
if area < 0:
raise ValueError("""Area can not be negative""" )
if force == 0:
UpperCamelCase = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
UpperCamelCase = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
UpperCamelCase = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("""One and only one argument must be 0""" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE = {
"""configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""ConvNextFeatureExtractor"""]
_SCREAMING_SNAKE_CASE = ["""ConvNextImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvNextForImageClassification""",
"""ConvNextModel""",
"""ConvNextPreTrainedModel""",
"""ConvNextBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""TFConvNextForImageClassification""",
"""TFConvNextModel""",
"""TFConvNextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 343 | 1 |
import pytest
_SCREAMING_SNAKE_CASE = """__dummy_dataset1__"""
_SCREAMING_SNAKE_CASE = """
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def lowercase( ) -> List[str]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowercase( ) -> Optional[int]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
UpperCamelCase = dataset_loading_script_name
UpperCamelCase = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=UpperCamelCase_ )
UpperCamelCase = script_dir / f"""{script_name}.py"""
with open(UpperCamelCase_ , """w""" ) as f:
f.write(UpperCamelCase_ )
return str(UpperCamelCase_ )
| 343 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = ShapEPipeline
__lowerCAmelCase = ["""prompt"""]
__lowerCAmelCase = ["""prompt"""]
__lowerCAmelCase = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
__lowerCAmelCase = False
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return 8
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowerCamelCase_ )
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
UpperCamelCase = PriorTransformer(**lowerCamelCase_ )
return model
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
UpperCamelCase = ShapERenderer(**lowerCamelCase_ )
return model
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.dummy_prior
UpperCamelCase = self.dummy_text_encoder
UpperCamelCase = self.dummy_tokenizer
UpperCamelCase = self.dummy_renderer
UpperCamelCase = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=lowerCamelCase_ , clip_sample=lowerCamelCase_ , clip_sample_range=1.0 , )
UpperCamelCase = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any]=0 ):
"""simple docstring"""
if str(lowerCamelCase_ ).startswith("""mps""" ):
UpperCamelCase = torch.manual_seed(lowerCamelCase_ )
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
UpperCamelCase = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = """cpu"""
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = self.pipeline_class(**lowerCamelCase_ )
UpperCamelCase = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
UpperCamelCase = output.images[0]
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCamelCase = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = torch_device == """cpu"""
UpperCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCamelCase_ , relax_max_difference=lowerCamelCase_ , )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = self.pipeline_class(**lowerCamelCase_ )
UpperCamelCase = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = 1
UpperCamelCase = 2
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
UpperCamelCase = batch_size * [inputs[key]]
UpperCamelCase = pipe(**lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
UpperCamelCase = ShapEPipeline.from_pretrained("""openai/shap-e""" )
UpperCamelCase = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCamelCase = pipe(
"""a shark""" , generator=lowerCamelCase_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
| 343 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
"""configuration_blenderbot_small""": [
"""BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotSmallConfig""",
"""BlenderbotSmallOnnxConfig""",
],
"""tokenization_blenderbot_small""": ["""BlenderbotSmallTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""BlenderbotSmallTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotSmallForCausalLM""",
"""BlenderbotSmallForConditionalGeneration""",
"""BlenderbotSmallModel""",
"""BlenderbotSmallPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""TFBlenderbotSmallForConditionalGeneration""",
"""TFBlenderbotSmallModel""",
"""TFBlenderbotSmallPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""FlaxBlenderbotSmallForConditionalGeneration""",
"""FlaxBlenderbotSmallModel""",
"""FlaxBlenderbotSmallPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 343 | from __future__ import annotations
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> list:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase , UpperCamelCase = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
UpperCamelCase = result + left + right
return input_list
def lowercase( UpperCamelCase_ ) -> list:
'''simple docstring'''
if len(UpperCamelCase_ ) <= 1:
return input_list
UpperCamelCase = list(UpperCamelCase_ )
# iteration for two-way merging
UpperCamelCase = 2
while p <= len(UpperCamelCase_ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(UpperCamelCase_ ) , UpperCamelCase_ ):
UpperCamelCase = i
UpperCamelCase = i + p - 1
UpperCamelCase = (low + high + 1) // 2
UpperCamelCase = merge(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# final merge of last two parts
if p * 2 >= len(UpperCamelCase_ ):
UpperCamelCase = i
UpperCamelCase = merge(UpperCamelCase_ , 0 , UpperCamelCase_ , len(UpperCamelCase_ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""").strip()
if user_input == "":
_SCREAMING_SNAKE_CASE = []
else:
_SCREAMING_SNAKE_CASE = [int(item.strip()) for item in user_input.split(""",""")]
print(iter_merge_sort(unsorted))
| 343 | 1 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
def __init__( self : str , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : str ):
"""simple docstring"""
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""" , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 343 | import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=32 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : int=10 , lowerCamelCase_ : Optional[int]=[8, 16, 32, 64] , lowerCamelCase_ : List[str]=[1, 1, 2, 1] , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : List[Any]="relu" , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=None , lowerCamelCase_ : List[Any]=["stage2", "stage3", "stage4"] , lowerCamelCase_ : Optional[Any]=[2, 3, 4] , lowerCamelCase_ : List[Any]=1 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = embeddings_size
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = len(lowerCamelCase_ )
UpperCamelCase = out_features
UpperCamelCase = out_indices
UpperCamelCase = num_groups
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCamelCase = BitModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = BitForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = BitBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase = None
UpperCamelCase = BitBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__lowerCAmelCase = (
{"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = BitModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return
@unittest.skip(reason="""Bit does not output attentions""" )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(config=lowerCamelCase_ )
for name, module in model.named_modules():
if isinstance(lowerCamelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ):
UpperCamelCase = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase = layer_type
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = BitModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase( ) -> Any:
'''simple docstring'''
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**lowerCamelCase_ )
# verify the logits
UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
UpperCamelCase = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (BitBackbone,) if is_torch_available() else ()
__lowerCAmelCase = BitConfig
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = BitModelTester(self )
| 343 | 1 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_SCREAMING_SNAKE_CASE = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_SCREAMING_SNAKE_CASE = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_SCREAMING_SNAKE_CASE = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> tuple[str, float]:
'''simple docstring'''
UpperCamelCase = len([g for position, g in enumerate(UpperCamelCase_ ) if g == main_target[position]] )
return (item, float(UpperCamelCase_ ))
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> tuple[str, str]:
'''simple docstring'''
UpperCamelCase = random.randint(0 , len(UpperCamelCase_ ) - 1 )
UpperCamelCase = parent_a[:random_slice] + parent_a[random_slice:]
UpperCamelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
UpperCamelCase = list(UpperCamelCase_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
UpperCamelCase = random.choice(UpperCamelCase_ )
return "".join(UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> list[str]:
'''simple docstring'''
UpperCamelCase = []
# Generate more children proportionally to the fitness score.
UpperCamelCase = int(parent_a[1] * 100 ) + 1
UpperCamelCase = 10 if child_n >= 10 else child_n
for _ in range(UpperCamelCase_ ):
UpperCamelCase = population_score[random.randint(0 , UpperCamelCase_ )][0]
UpperCamelCase , UpperCamelCase = crossover(parent_a[0] , UpperCamelCase_ )
# Append new string to the population list.
pop.append(mutate(UpperCamelCase_ , UpperCamelCase_ ) )
pop.append(mutate(UpperCamelCase_ , UpperCamelCase_ ) )
return pop
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = True ) -> tuple[int, int, str]:
'''simple docstring'''
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
UpperCamelCase = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(UpperCamelCase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
UpperCamelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
UpperCamelCase = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(UpperCamelCase_ )
# Generate random starting population.
UpperCamelCase = []
for _ in range(UpperCamelCase_ ):
population.append("""""".join([random.choice(UpperCamelCase_ ) for i in range(len(UpperCamelCase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
UpperCamelCase , UpperCamelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(UpperCamelCase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
UpperCamelCase = [evaluate(UpperCamelCase_ , UpperCamelCase_ ) for item in population]
# Check if there is a matching evolution.
UpperCamelCase = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x[1] , reverse=UpperCamelCase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
UpperCamelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(UpperCamelCase_ )
# Normalize population score to be between 0 and 1.
UpperCamelCase = [
(item, score / len(UpperCamelCase_ )) for item, score in population_score
]
# This is selection
for i in range(UpperCamelCase_ ):
population.extend(select(population_score[int(UpperCamelCase_ )] , UpperCamelCase_ , UpperCamelCase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(UpperCamelCase_ ) > N_POPULATION:
break
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = (
"""This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"""
)
_SCREAMING_SNAKE_CASE = list(
""" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"""
"""nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"""
)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 343 | from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=3 , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[int]=10 , lowerCamelCase_ : List[str]=[10, 20, 30, 40] , lowerCamelCase_ : Tuple=[1, 1, 2, 1] , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : Tuple="relu" , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Dict=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = embeddings_size
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = len(lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = TFResNetModel(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = TFResNetForImageClassification(lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__lowerCAmelCase = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = TFResNetModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : str ):
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase = layer_type
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFResNetModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase( ) -> Any:
'''simple docstring'''
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" )
# forward pass
UpperCamelCase = model(**lowerCamelCase_ )
# verify the logits
UpperCamelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
UpperCamelCase = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCamelCase_ , atol=1E-4 ) )
| 343 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = 42
__lowerCAmelCase = 42
def __init__( self : int , lowerCamelCase_ : UNetaDModel , lowerCamelCase_ : KarrasVeScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self : Tuple , lowerCamelCase_ : int = 1 , lowerCamelCase_ : int = 50 , lowerCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , **lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
UpperCamelCase = self.unet.config.sample_size
UpperCamelCase = (batch_size, 3, img_size, img_size)
UpperCamelCase = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
UpperCamelCase = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
UpperCamelCase = self.scheduler.schedule[t]
UpperCamelCase = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
UpperCamelCase , UpperCamelCase = self.scheduler.add_noise_to_input(lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCamelCase = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
UpperCamelCase = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCamelCase = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
UpperCamelCase = self.scheduler.step_correct(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , step_output.prev_sample , step_output["""derivative"""] , )
UpperCamelCase = step_output.prev_sample
UpperCamelCase = (sample / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 343 | import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_SCREAMING_SNAKE_CASE = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
def lowercase( UpperCamelCase_ ) -> Any:
'''simple docstring'''
UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
UpperCamelCase = value
else:
UpperCamelCase = value
return new_state_dict
def lowercase( UpperCamelCase_ , UpperCamelCase_=False ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = """"""
if is_panoptic:
UpperCamelCase = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
def lowercase( ) -> Any:
'''simple docstring'''
UpperCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any:
'''simple docstring'''
UpperCamelCase = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCamelCase = """resnet101"""
if "dc5" in model_name:
UpperCamelCase = True
UpperCamelCase = """panoptic""" in model_name
if is_panoptic:
UpperCamelCase = 250
else:
UpperCamelCase = 91
UpperCamelCase = """huggingface/label-files"""
UpperCamelCase = """coco-detection-id2label.json"""
UpperCamelCase = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
# load image processor
UpperCamelCase = """coco_panoptic""" if is_panoptic else """coco_detection"""
UpperCamelCase = ConditionalDetrImageProcessor(format=UpperCamelCase_ )
# prepare image
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=UpperCamelCase_ , return_tensors="""pt""" )
UpperCamelCase = encoding["""pixel_values"""]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
UpperCamelCase = torch.hub.load("""DeppMeng/ConditionalDETR""" , UpperCamelCase_ , pretrained=UpperCamelCase_ ).eval()
UpperCamelCase = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCamelCase = """conditional_detr.""" + src
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = rename_backbone_keys(UpperCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase_ , is_panoptic=UpperCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
# finally, create HuggingFace model and load state dict
UpperCamelCase = ConditionalDetrForSegmentation(UpperCamelCase_ ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
model.push_to_hub(repo_id=UpperCamelCase_ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
UpperCamelCase = conditional_detr(UpperCamelCase_ )
UpperCamelCase = model(UpperCamelCase_ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 343 | 1 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str]=13 , lowerCamelCase_ : Any=30 , lowerCamelCase_ : Dict=2 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : int=True , lowerCamelCase_ : Optional[Any]=32 , lowerCamelCase_ : int=5 , lowerCamelCase_ : Dict=4 , lowerCamelCase_ : Any=37 , lowerCamelCase_ : List[str]="gelu" , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Optional[Any]=10 , lowerCamelCase_ : List[str]=0.0_2 , lowerCamelCase_ : List[str]=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = num_patches + 1
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = ViTMSNModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict ):
"""simple docstring"""
UpperCamelCase = self.type_sequence_label_size
UpperCamelCase = ViTMSNForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = ViTMSNForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__lowerCAmelCase = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = ViTMSNModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = ViTMSNModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase( ) -> Dict:
'''simple docstring'''
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
torch.manual_seed(2 )
UpperCamelCase = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(lowerCamelCase_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**lowerCamelCase_ )
# verify the logits
UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
UpperCamelCase = torch.tensor([-0.0_8_0_3, -0.4_4_5_4, -0.2_3_7_5] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 343 | from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Dict=13 , lowerCamelCase_ : str=30 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=3 , lowerCamelCase_ : Any=True , lowerCamelCase_ : int=True , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : int=4 , lowerCamelCase_ : str=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : List[Any]=10 , lowerCamelCase_ : List[Any]=0.0_2 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[Any]=0.6 , lowerCamelCase_ : Optional[Any]=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = mask_ratio
UpperCamelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = TFViTMAEModel(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = TFViTMAEForPreTraining(lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ )
# expected sequence length = num_patches
UpperCamelCase = (self.image_size // self.patch_size) ** 2
UpperCamelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = TFViTMAEForPreTraining(lowerCamelCase_ )
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ )
UpperCamelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__lowerCAmelCase = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = TFViTMAEModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
UpperCamelCase = copy.deepcopy(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ )
UpperCamelCase = outputs_dict[0].numpy()
UpperCamelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(lowerCamelCase_ : List[Any] ):
UpperCamelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(lowerCamelCase_ ):
UpperCamelCase = v.numpy()
else:
UpperCamelCase = np.array(lowerCamelCase_ )
return inputs_np_dict
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = prepare_numpy_arrays(lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ )
self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase = tf.constant(lowerCamelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase = tf_noise
super().check_pt_tf_models(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(lowerCamelCase_ )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(lowerCamelCase_ , lowerCamelCase_ ),)
if isinstance(lowerCamelCase_ , lowerCamelCase_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(lowerCamelCase_ , """_keras_serializable""" , lowerCamelCase_ )
}
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase = tf.convert_to_tensor(lowerCamelCase_ )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
UpperCamelCase = main_layer_class(lowerCamelCase_ )
UpperCamelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
UpperCamelCase = tf.keras.Model(lowerCamelCase_ , outputs=main_layer(lowerCamelCase_ ) )
UpperCamelCase = model(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = os.path.join(lowerCamelCase_ , """keras_model.h5""" )
model.save(lowerCamelCase_ )
UpperCamelCase = tf.keras.models.load_model(
lowerCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(lowerCamelCase_ , tf.keras.Model )
UpperCamelCase = model(lowerCamelCase_ )
self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
UpperCamelCase = outputs.last_hidden_state.numpy()
UpperCamelCase = 0
else:
UpperCamelCase = outputs.logits.numpy()
UpperCamelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ , saved_model=lowerCamelCase_ )
UpperCamelCase = model_class.from_pretrained(lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
UpperCamelCase = after_outputs["""last_hidden_state"""].numpy()
UpperCamelCase = 0
else:
UpperCamelCase = after_outputs["""logits"""].numpy()
UpperCamelCase = 0
UpperCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase_ , 1E-5 )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
UpperCamelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(lowerCamelCase_ )
UpperCamelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
UpperCamelCase = model_class.from_config(model.config )
UpperCamelCase = new_model(lowerCamelCase_ ) # Build model
new_model.set_weights(model.get_weights() )
UpperCamelCase = new_model(lowerCamelCase_ , noise=lowerCamelCase_ )
self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
pass
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase( ) -> int:
'''simple docstring'''
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase = ViTMAEConfig()
UpperCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ )
# verify the logits
UpperCamelCase = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
UpperCamelCase = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
| 343 | 1 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
# Load configuration defined in the metadata file
with open(UpperCamelCase_ ) as metadata_file:
UpperCamelCase = json.load(UpperCamelCase_ )
UpperCamelCase = LukeConfig(use_entity_aware_attention=UpperCamelCase_ , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
UpperCamelCase = torch.load(UpperCamelCase_ , map_location="""cpu""" )
# Load the entity vocab file
UpperCamelCase = load_entity_vocab(UpperCamelCase_ )
UpperCamelCase = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCamelCase = AddedToken("""<ent>""" , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ )
UpperCamelCase = AddedToken("""<ent2>""" , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = LukeTokenizer.from_pretrained(UpperCamelCase_ )
# Initialize the embeddings of the special tokens
UpperCamelCase = state_dict["""embeddings.word_embeddings.weight"""]
UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
UpperCamelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCamelCase = f"""encoder.layer.{layer_index}.attention.self."""
UpperCamelCase = state_dict[prefix + matrix_name]
UpperCamelCase = state_dict[prefix + matrix_name]
UpperCamelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCamelCase = state_dict["""entity_embeddings.entity_embeddings.weight"""]
UpperCamelCase = entity_emb[entity_vocab["""[MASK]"""]]
UpperCamelCase = LukeModel(config=UpperCamelCase_ ).eval()
UpperCamelCase , UpperCamelCase = model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
if not (len(UpperCamelCase_ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f"""Missing keys {", ".join(UpperCamelCase_ )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
f""" {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}""" )
# Check outputs
UpperCamelCase = LukeTokenizer.from_pretrained(UpperCamelCase_ , task="""entity_classification""" )
UpperCamelCase = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
UpperCamelCase = (39, 42)
UpperCamelCase = tokenizer(UpperCamelCase_ , entity_spans=[span] , add_prefix_space=UpperCamelCase_ , return_tensors="""pt""" )
UpperCamelCase = model(**UpperCamelCase_ )
# Verify word hidden states
if model_size == "large":
UpperCamelCase = torch.Size((1, 42, 1024) )
UpperCamelCase = torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
UpperCamelCase = torch.Size((1, 42, 768) )
UpperCamelCase = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
UpperCamelCase = torch.Size((1, 1, 1024) )
UpperCamelCase = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
UpperCamelCase = torch.Size((1, 1, 768) )
UpperCamelCase = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(UpperCamelCase_ ) )
model.save_pretrained(UpperCamelCase_ )
def lowercase( UpperCamelCase_ ) -> int:
'''simple docstring'''
UpperCamelCase = {}
with open(UpperCamelCase_ , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(UpperCamelCase_ ):
UpperCamelCase , UpperCamelCase = line.rstrip().split("""\t""" )
UpperCamelCase = index
return entity_vocab
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 343 | def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool:
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(UpperCamelCase_ ) )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool:
'''simple docstring'''
# Base Case
if index == len(UpperCamelCase_ ):
return True
# Recursive Step
for i in range(UpperCamelCase_ ):
if valid_coloring(graph[index] , UpperCamelCase_ , UpperCamelCase_ ):
# Color current vertex
UpperCamelCase = i
# Validate coloring
if util_color(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , index + 1 ):
return True
# Backtrack
UpperCamelCase = -1
return False
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> list[int]:
'''simple docstring'''
UpperCamelCase = [-1] * len(UpperCamelCase_ )
if util_color(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , 0 ):
return colored_vertices
return []
| 343 | 1 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """spiece.model"""}
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
_SCREAMING_SNAKE_CASE = {
"""AI-Sweden/gpt-sw3-126m""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-350m""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-1.6b""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-6.7b""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-20b""": 2_0_4_8,
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : str , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : Union[str, Any]=False , lowerCamelCase_ : Tuple=False , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : int=None , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Optional[Dict[str, Any]] = None , **lowerCamelCase_ : str , ):
"""simple docstring"""
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCamelCase = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
UpperCamelCase = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
UpperCamelCase = """<|endoftext|>""" if eos_token is None else eos_token
UpperCamelCase = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
UpperCamelCase = unk_token if pad_token is None else pad_token
UpperCamelCase = eos_token if bos_token is None else bos_token
else:
UpperCamelCase = """<pad>""" if pad_token is None else pad_token
UpperCamelCase = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
UpperCamelCase = do_lower_case
UpperCamelCase = remove_space
UpperCamelCase = keep_accents
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase_ )
# Used for whitespace normalization in input texts
# fmt : off
UpperCamelCase = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
UpperCamelCase = re.compile(
f"""[{"".join(map(lowerCamelCase_ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]""" )
def __getstate__( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self : Any , lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return len(self.sp_model )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = self.non_printing_characters_re.sub("""""" , lowerCamelCase_ )
# Normalize whitespaces
UpperCamelCase = """""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
UpperCamelCase = unicodedata.normalize("""NFC""" , lowerCamelCase_ )
return text
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = self.preprocess_text(lowerCamelCase_ )
return self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : str ):
"""simple docstring"""
return self.sp_model.PieceToId(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int ):
"""simple docstring"""
return self.sp_model.IdToPiece(lowerCamelCase_ )
@staticmethod
def lowerCamelCase_ ( lowerCamelCase_ : str ):
"""simple docstring"""
return out_string
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = """"""
UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase_ ) + token
UpperCamelCase = True
UpperCamelCase = []
else:
current_sub_tokens.append(lowerCamelCase_ )
UpperCamelCase = False
out_string += self.sp_model.decode(lowerCamelCase_ )
return out_string
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_ , """wb""" ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (out_vocab_file,)
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Union[str, List[str]] , lowerCamelCase_ : Union[str, bool] = False ):
"""simple docstring"""
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase = self.preprocess_text(lowerCamelCase_ )
UpperCamelCase = self.sp_model.encode(lowerCamelCase_ )
else:
UpperCamelCase = [self.preprocess_text(lowerCamelCase_ ) for t in text]
UpperCamelCase = self.sp_model.encode(lowerCamelCase_ )
if return_tensors is True or return_tensors == "pt":
UpperCamelCase = torch.tensor(lowerCamelCase_ )
return token_ids
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Union[int, List[int]] ):
"""simple docstring"""
return self.sp_model.decode(lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : "Conversation" ):
"""simple docstring"""
UpperCamelCase = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
UpperCamelCase = (
f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(lowerCamelCase_ ) + f"""{self.bos_token}Bot:"""
)
return self.encode(text=lowerCamelCase_ )
| 343 | import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
_SCREAMING_SNAKE_CASE = {
"""allenai/led-base-16384""": 1_6_3_8_4,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase( ) -> List[str]:
'''simple docstring'''
UpperCamelCase = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCamelCase = bs[:]
UpperCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase_ )
cs.append(2**8 + n )
n += 1
UpperCamelCase = [chr(UpperCamelCase_ ) for n in cs]
return dict(zip(UpperCamelCase_ , UpperCamelCase_ ) )
def lowercase( UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
UpperCamelCase = set()
UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase = char
return pairs
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str="replace" , lowerCamelCase_ : Any="<s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : str="<s>" , lowerCamelCase_ : str="<unk>" , lowerCamelCase_ : int="<pad>" , lowerCamelCase_ : List[str]="<mask>" , lowerCamelCase_ : str=False , **lowerCamelCase_ : str , ):
"""simple docstring"""
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding="""utf-8""" ) as vocab_handle:
UpperCamelCase = json.load(lowerCamelCase_ )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = errors # how to handle errors in decoding
UpperCamelCase = bytes_to_unicode()
UpperCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ , encoding="""utf-8""" ) as merges_handle:
UpperCamelCase = merges_handle.read().split("""\n""" )[1:-1]
UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
UpperCamelCase = {}
UpperCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return len(self.encoder )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Dict ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase = tuple(lowerCamelCase_ )
UpperCamelCase = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
UpperCamelCase = min(lowerCamelCase_ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase = bigram
UpperCamelCase = []
UpperCamelCase = 0
while i < len(lowerCamelCase_ ):
try:
UpperCamelCase = word.index(lowerCamelCase_ , lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase = tuple(lowerCamelCase_ )
UpperCamelCase = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
UpperCamelCase = get_pairs(lowerCamelCase_ )
UpperCamelCase = """ """.join(lowerCamelCase_ )
UpperCamelCase = word
return word
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = []
for token in re.findall(self.pat , lowerCamelCase_ ):
UpperCamelCase = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) )
return bpe_tokens
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : str ):
"""simple docstring"""
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Any ):
"""simple docstring"""
return self.decoder.get(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = """""".join(lowerCamelCase_ )
UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def lowerCamelCase_ ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + """\n""" )
UpperCamelCase = 0
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
UpperCamelCase = token_index
writer.write(""" """.join(lowerCamelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=False , **lowerCamelCase_ : Any ):
"""simple docstring"""
UpperCamelCase = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
UpperCamelCase = """ """ + text
return (text, kwargs)
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[bool] = None , ):
"""simple docstring"""
UpperCamelCase = super()._pad(
encoded_inputs=lowerCamelCase_ , max_length=lowerCamelCase_ , padding_strategy=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
# Load from model defaults
if return_attention_mask is None:
UpperCamelCase = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCamelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCamelCase = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ )
if needs_to_be_padded:
UpperCamelCase = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCamelCase = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCamelCase = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 343 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = ["""pixel_values"""]
def __init__( self : int , lowerCamelCase_ : bool = True , lowerCamelCase_ : Dict[str, int] = None , lowerCamelCase_ : PILImageResampling = PIL.Image.BICUBIC , lowerCamelCase_ : bool = True , lowerCamelCase_ : Dict[str, int] = None , lowerCamelCase_ : Union[int, float] = 1 / 255 , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Union[float, List[float]]] = None , lowerCamelCase_ : Optional[Union[float, List[float]]] = None , **lowerCamelCase_ : str , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
UpperCamelCase = size if size is not None else {"""height""": 256, """width""": 256}
UpperCamelCase = get_size_dict(lowerCamelCase_ )
UpperCamelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
UpperCamelCase = get_size_dict(lowerCamelCase_ , param_name="""crop_size""" )
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = resample
UpperCamelCase = do_center_crop
UpperCamelCase = crop_size
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_normalize
UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : PILImageResampling = PIL.Image.BICUBIC , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
UpperCamelCase = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
lowerCamelCase_ , size=(size["""height"""], size["""width"""]) , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : Dict , ):
"""simple docstring"""
UpperCamelCase = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(lowerCamelCase_ , size=(size["""height"""], size["""width"""]) , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Union[int, float] , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : Tuple , ):
"""simple docstring"""
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Union[float, List[float]] , lowerCamelCase_ : Union[float, List[float]] , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : Dict , ):
"""simple docstring"""
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : ImageInput , lowerCamelCase_ : bool = None , lowerCamelCase_ : Dict[str, int] = None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : bool = None , lowerCamelCase_ : Dict[str, int] = None , lowerCamelCase_ : bool = None , lowerCamelCase_ : float = None , lowerCamelCase_ : bool = None , lowerCamelCase_ : Optional[Union[float, List[float]]] = None , lowerCamelCase_ : Optional[Union[float, List[float]]] = None , lowerCamelCase_ : Optional[Union[str, TensorType]] = None , lowerCamelCase_ : ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase_ : Union[str, Any] , ):
"""simple docstring"""
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase = image_mean if image_mean is not None else self.image_mean
UpperCamelCase = image_std if image_std is not None else self.image_std
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(lowerCamelCase_ )
UpperCamelCase = crop_size if crop_size is not None else self.crop_size
UpperCamelCase = get_size_dict(lowerCamelCase_ , param_name="""crop_size""" )
UpperCamelCase = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
UpperCamelCase = [self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images]
if do_center_crop:
UpperCamelCase = [self.center_crop(image=lowerCamelCase_ , size=lowerCamelCase_ ) for image in images]
if do_rescale:
UpperCamelCase = [self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ ) for image in images]
if do_normalize:
UpperCamelCase = [self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ ) for image in images]
UpperCamelCase = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
UpperCamelCase = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ )
| 343 | import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_SCREAMING_SNAKE_CASE = (
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
_SCREAMING_SNAKE_CASE = (
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
_SCREAMING_SNAKE_CASE = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
_SCREAMING_SNAKE_CASE = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
_SCREAMING_SNAKE_CASE = (
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 1_4]),
("""2H 5D 3C AS 5S""", False, [1_4, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [1_4, 1_3, 1_2, 1_1, 1_0]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
_SCREAMING_SNAKE_CASE = (
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
_SCREAMING_SNAKE_CASE = (
("""JH AH TH KH QH""", 2_3),
("""JH 9H TH KH QH""", 2_2),
("""JC KH JS JD JH""", 2_1),
("""KH KC 3S 3H 3D""", 2_0),
("""8C 9C 5C 3C TC""", 1_9),
("""JS QS 9H TS KH""", 1_8),
("""7C 7S KH 2H 7H""", 1_7),
("""3C KH 5D 5S KH""", 1_6),
("""QH 8H KD JH 8S""", 1_5),
("""2D 6D 9D TH 7D""", 1_4),
)
def lowercase( ) -> Dict:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = randrange(len(UpperCamelCase_ ) ), randrange(len(UpperCamelCase_ ) )
UpperCamelCase = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
UpperCamelCase , UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowercase( UpperCamelCase_ = 100 ) -> List[Any]:
'''simple docstring'''
return (generate_random_hand() for _ in range(UpperCamelCase_ ))
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
UpperCamelCase = PokerHand(UpperCamelCase_ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected
def lowercase( ) -> Dict:
'''simple docstring'''
UpperCamelCase = [PokerHand(UpperCamelCase_ ) for hand in SORTED_HANDS]
UpperCamelCase = poker_hands.copy()
shuffle(UpperCamelCase_ )
UpperCamelCase = chain(sorted(UpperCamelCase_ ) )
for index, hand in enumerate(UpperCamelCase_ ):
assert hand == poker_hands[index]
def lowercase( ) -> Union[str, Any]:
'''simple docstring'''
# Test that five high straights are compared correctly.
UpperCamelCase = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=UpperCamelCase_ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowercase( ) -> str:
'''simple docstring'''
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
UpperCamelCase = PokerHand("""2C 4S AS 3D 5C""" )
UpperCamelCase = True
UpperCamelCase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowercase( ) -> int:
'''simple docstring'''
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
UpperCamelCase = 0
UpperCamelCase = os.path.abspath(os.path.dirname(UpperCamelCase_ ) )
UpperCamelCase = os.path.join(UpperCamelCase_ , """poker_hands.txt""" )
with open(UpperCamelCase_ ) as file_hand:
for line in file_hand:
UpperCamelCase = line[:14].strip()
UpperCamelCase = line[15:].strip()
UpperCamelCase , UpperCamelCase = PokerHand(UpperCamelCase_ ), PokerHand(UpperCamelCase_ )
UpperCamelCase = player.compare_with(UpperCamelCase_ )
if output == "Win":
answer += 1
assert answer == 376
| 343 | 1 |
def lowercase( UpperCamelCase_ = 1000 ) -> int:
'''simple docstring'''
UpperCamelCase = 3
UpperCamelCase = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 343 | import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = """xlnet"""
__lowerCAmelCase = ["""mems"""]
__lowerCAmelCase = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , lowerCamelCase_ : Any=3_2000 , lowerCamelCase_ : Dict=1024 , lowerCamelCase_ : List[str]=24 , lowerCamelCase_ : Dict=16 , lowerCamelCase_ : List[Any]=4096 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Union[str, Any]="bi" , lowerCamelCase_ : Optional[Any]=0.0_2 , lowerCamelCase_ : Optional[int]=1E-12 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : Union[str, Any]=512 , lowerCamelCase_ : Any=None , lowerCamelCase_ : str=True , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Optional[int]=-1 , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Union[str, Any]="last" , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : str="tanh" , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Dict=5 , lowerCamelCase_ : str=5 , lowerCamelCase_ : Optional[int]=5 , lowerCamelCase_ : Any=1 , lowerCamelCase_ : int=2 , **lowerCamelCase_ : List[Any] , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = d_model
UpperCamelCase = n_layer
UpperCamelCase = n_head
if d_model % n_head != 0:
raise ValueError(f"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
UpperCamelCase = d_model // n_head
UpperCamelCase = ff_activation
UpperCamelCase = d_inner
UpperCamelCase = untie_r
UpperCamelCase = attn_type
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = dropout
UpperCamelCase = mem_len
UpperCamelCase = reuse_len
UpperCamelCase = bi_data
UpperCamelCase = clamp_len
UpperCamelCase = same_length
UpperCamelCase = summary_type
UpperCamelCase = summary_use_proj
UpperCamelCase = summary_activation
UpperCamelCase = summary_last_dropout
UpperCamelCase = start_n_top
UpperCamelCase = end_n_top
UpperCamelCase = bos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"""The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"""
""" instead.""" , lowerCamelCase_ , )
UpperCamelCase = kwargs["""use_cache"""]
UpperCamelCase = use_mems_eval
UpperCamelCase = use_mems_train
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
@property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 343 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE = {
"""configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""ConvNextFeatureExtractor"""]
_SCREAMING_SNAKE_CASE = ["""ConvNextImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvNextForImageClassification""",
"""ConvNextModel""",
"""ConvNextPreTrainedModel""",
"""ConvNextBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""TFConvNextForImageClassification""",
"""TFConvNextModel""",
"""TFConvNextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 343 | import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures""")
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy-config.json""")
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = 0
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
UpperCamelCase = WavaVecaFeatureExtractor(**lowerCamelCase_ )
# save in new folder
model_config.save_pretrained(lowerCamelCase_ )
config.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
# make sure private variable is not incorrectly saved
UpperCamelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , revision="""aaaaaa""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
with self.assertRaises(lowerCamelCase_ ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase_ ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase_ ):
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCamelCase = CustomFeatureExtractor.from_pretrained(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = True
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# If remote code is not set, the default is to use local
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(lowerCamelCase_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 343 | 1 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=3 , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[int]=10 , lowerCamelCase_ : List[str]=[10, 20, 30, 40] , lowerCamelCase_ : Tuple=[1, 1, 2, 1] , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : Tuple="relu" , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Dict=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = embeddings_size
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = len(lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = TFResNetModel(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = TFResNetForImageClassification(lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__lowerCAmelCase = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = TFResNetModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : str ):
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase = layer_type
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFResNetModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase( ) -> Any:
'''simple docstring'''
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" )
# forward pass
UpperCamelCase = model(**lowerCamelCase_ )
# verify the logits
UpperCamelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
UpperCamelCase = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCamelCase_ , atol=1E-4 ) )
| 343 | import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
def lowercase( UpperCamelCase_ ) -> Dict:
'''simple docstring'''
# word like '180' or '身高' or '神'
for char in word:
UpperCamelCase = ord(UpperCamelCase_ )
if not _is_chinese_char(UpperCamelCase_ ):
return 0
return 1
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = set()
for token in tokens:
UpperCamelCase = len(UpperCamelCase_ ) > 1 and is_chinese(UpperCamelCase_ )
if chinese_word:
word_set.add(UpperCamelCase_ )
UpperCamelCase = list(UpperCamelCase_ )
return word_list
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
UpperCamelCase = max([len(UpperCamelCase_ ) for w in chinese_word_set] )
UpperCamelCase = bert_tokens
UpperCamelCase , UpperCamelCase = 0, len(UpperCamelCase_ )
while start < end:
UpperCamelCase = True
if is_chinese(bert_word[start] ):
UpperCamelCase = min(end - start , UpperCamelCase_ )
for i in range(UpperCamelCase_ , 1 , -1 ):
UpperCamelCase = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCamelCase = """##""" + bert_word[j]
UpperCamelCase = start + i
UpperCamelCase = False
break
if single_word:
start += 1
return bert_word
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
UpperCamelCase = []
for i in range(0 , len(UpperCamelCase_ ) , 100 ):
UpperCamelCase = ltp_tokenizer.seg(lines[i : i + 100] )[0]
UpperCamelCase = [get_chinese_word(UpperCamelCase_ ) for r in res]
ltp_res.extend(UpperCamelCase_ )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
UpperCamelCase = []
for i in range(0 , len(UpperCamelCase_ ) , 100 ):
UpperCamelCase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
UpperCamelCase = []
for input_ids, chinese_word in zip(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase = []
for id in input_ids:
UpperCamelCase = bert_tokenizer._convert_id_to_token(UpperCamelCase_ )
input_tokens.append(UpperCamelCase_ )
UpperCamelCase = add_sub_symbol(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCamelCase_ ):
if token[:2] == "##":
UpperCamelCase = token[2:]
# save chinese tokens' pos
if len(UpperCamelCase_ ) == 1 and _is_chinese_char(ord(UpperCamelCase_ ) ):
ref_id.append(UpperCamelCase_ )
ref_ids.append(UpperCamelCase_ )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
return ref_ids
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
UpperCamelCase = f.readlines()
UpperCamelCase = [line.strip() for line in data if len(UpperCamelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCamelCase = LTP(args.ltp ) # faster in GPU device
UpperCamelCase = BertTokenizer.from_pretrained(args.bert )
UpperCamelCase = prepare_ref(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
UpperCamelCase = [json.dumps(UpperCamelCase_ ) + """\n""" for ref in ref_ids]
f.writelines(UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
main(args)
| 343 | 1 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int]=13 , lowerCamelCase_ : Union[str, Any]=3 , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : Optional[Any]=224 , lowerCamelCase_ : List[Any]=1000 , lowerCamelCase_ : Dict=[3, 3, 6, 4] , lowerCamelCase_ : Optional[int]=[48, 56, 112, 220] , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = num_labels
UpperCamelCase = image_size
UpperCamelCase = layer_depths
UpperCamelCase = embed_dims
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCamelCase_ , layer_scale_init_value=1E-5 , )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = SwiftFormerModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = SwiftFormerForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
UpperCamelCase = SwiftFormerForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) = self.prepare_config_and_inputs()
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__lowerCAmelCase = (
{"""feature-extraction""": SwiftFormerModel, """image-classification""": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = SwiftFormerModelTester(self )
UpperCamelCase = ConfigTester(
self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = SwiftFormerModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase_ : int , lowerCamelCase_ : List[str] , lowerCamelCase_ : int ):
UpperCamelCase = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = outputs.hidden_states
UpperCamelCase = 8
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCamelCase_ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
def _config_zero_init(lowerCamelCase_ : List[Any] ):
UpperCamelCase = copy.deepcopy(lowerCamelCase_ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCamelCase_ , lowerCamelCase_ , 1E-10 )
if isinstance(getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ ):
UpperCamelCase = _config_zero_init(getattr(lowerCamelCase_ , lowerCamelCase_ ) )
setattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return configs_no_init
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
pass
def lowercase( ) -> Any:
'''simple docstring'''
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(lowerCamelCase_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**lowerCamelCase_ )
# verify the logits
UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
UpperCamelCase = torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 343 | import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def __init__( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=13 , lowerCamelCase_ : Union[str, Any]=30 , lowerCamelCase_ : str=2 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : Union[str, Any]=5 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : Any=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : Union[str, Any]=10 , lowerCamelCase_ : Optional[Any]=0.0_2 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = num_patches + 1
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
return config, pixel_values
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = FlaxViTModel(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase = (self.image_size, self.image_size)
UpperCamelCase = (self.patch_size, self.patch_size)
UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.type_sequence_label_size
UpperCamelCase = FlaxViTForImageClassification(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = FlaxViTForImageClassification(lowerCamelCase_ )
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = FlaxViTModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = model_class(lowerCamelCase_ )
@jax.jit
def model_jitted(lowerCamelCase_ : Any , **lowerCamelCase_ : Any ):
return model(pixel_values=lowerCamelCase_ , **lowerCamelCase_ )
with self.subTest("""JIT Enabled""" ):
UpperCamelCase = model_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCamelCase = model_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
UpperCamelCase = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowerCamelCase_ )
| 343 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 343 | import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str=13 , lowerCamelCase_ : Any=7 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Dict=99 , lowerCamelCase_ : str=24 , lowerCamelCase_ : Optional[int]=2 , lowerCamelCase_ : List[str]=6 , lowerCamelCase_ : List[Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any=512 , lowerCamelCase_ : List[Any]=16 , lowerCamelCase_ : List[Any]=2 , lowerCamelCase_ : int=0.0_2 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[Any]=1000 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = range_bbox
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase = bbox[i, j, 3]
UpperCamelCase = bbox[i, j, 1]
UpperCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase = bbox[i, j, 2]
UpperCamelCase = bbox[i, j, 0]
UpperCamelCase = t
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , ):
"""simple docstring"""
UpperCamelCase = LiltModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = LiltForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , ):
"""simple docstring"""
UpperCamelCase = LiltForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCAmelCase = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict ):
"""simple docstring"""
return True
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = LiltModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = LiltModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(lowerCamelCase_ )
UpperCamelCase = torch.tensor([[1, 2]] , device=lowerCamelCase_ )
UpperCamelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(input_ids=lowerCamelCase_ , bbox=lowerCamelCase_ )
UpperCamelCase = torch.Size([1, 2, 768] )
UpperCamelCase = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=lowerCamelCase_ , )
self.assertTrue(outputs.last_hidden_state.shape , lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowerCamelCase_ , atol=1E-3 ) )
| 343 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.